1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
16 #include <rte_memzone.h>
17 #include <rte_malloc.h>
23 #include <rte_hash_crc.h>
25 #include "i40e_logs.h"
26 #include "base/i40e_type.h"
27 #include "base/i40e_prototype.h"
28 #include "i40e_ethdev.h"
29 #include "i40e_rxtx.h"
31 #define I40E_FDIR_MZ_NAME "FDIR_MEMZONE"
33 #define IPV6_ADDR_LEN 16
36 #define I40E_FDIR_PKT_LEN 512
37 #define I40E_FDIR_IP_DEFAULT_LEN 420
38 #define I40E_FDIR_IP_DEFAULT_TTL 0x40
39 #define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45
40 #define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50
41 #define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60000000
43 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF
44 #define I40E_FDIR_IPv6_PAYLOAD_LEN 380
45 #define I40E_FDIR_UDP_DEFAULT_LEN 400
46 #define I40E_FDIR_GTP_DEFAULT_LEN 384
47 #define I40E_FDIR_INNER_IP_DEFAULT_LEN 384
48 #define I40E_FDIR_INNER_IPV6_DEFAULT_LEN 344
50 #define I40E_FDIR_GTPC_DST_PORT 2123
51 #define I40E_FDIR_GTPU_DST_PORT 2152
52 #define I40E_FDIR_GTP_VER_FLAG_0X30 0x30
53 #define I40E_FDIR_GTP_VER_FLAG_0X32 0x32
54 #define I40E_FDIR_GTP_MSG_TYPE_0X01 0x01
55 #define I40E_FDIR_GTP_MSG_TYPE_0XFF 0xFF
57 /* Wait time for fdir filter programming */
58 #define I40E_FDIR_MAX_WAIT_US 10000
60 /* Wait count and interval for fdir filter flush */
61 #define I40E_FDIR_FLUSH_RETRY 50
62 #define I40E_FDIR_FLUSH_INTERVAL_MS 5
64 #define I40E_COUNTER_PF 2
65 /* Statistic counter index for one pf */
66 #define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF)
68 #define I40E_FDIR_FLOWS ( \
69 (1ULL << RTE_ETH_FLOW_FRAG_IPV4) | \
70 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
71 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
72 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
73 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
74 (1ULL << RTE_ETH_FLOW_FRAG_IPV6) | \
75 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
76 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
77 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
78 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
79 (1ULL << RTE_ETH_FLOW_L2_PAYLOAD))
81 static int i40e_fdir_filter_programming(struct i40e_pf *pf,
82 enum i40e_filter_pctype pctype,
83 const struct rte_eth_fdir_filter *filter,
85 static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
86 struct i40e_fdir_filter *filter);
87 static struct i40e_fdir_filter *
88 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
89 const struct i40e_fdir_input *input);
90 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
91 struct i40e_fdir_filter *filter);
93 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
94 enum i40e_filter_pctype pctype,
95 const struct i40e_fdir_filter_conf *filter,
99 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
101 struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
102 struct i40e_hmc_obj_rxq rx_ctx;
103 int err = I40E_SUCCESS;
105 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
106 /* Init the RX queue in hardware */
107 rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT;
109 rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
110 rx_ctx.qlen = rxq->nb_rx_desc;
111 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
114 rx_ctx.dtype = i40e_header_split_none;
115 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
116 rx_ctx.rxmax = ETHER_MAX_LEN;
117 rx_ctx.tphrdesc_ena = 1;
118 rx_ctx.tphwdesc_ena = 1;
119 rx_ctx.tphdata_ena = 1;
120 rx_ctx.tphhead_ena = 1;
121 rx_ctx.lrxqthresh = 2;
127 err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx);
128 if (err != I40E_SUCCESS) {
129 PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context.");
132 err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx);
133 if (err != I40E_SUCCESS) {
134 PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context.");
137 rxq->qrx_tail = hw->hw_addr +
138 I40E_QRX_TAIL(rxq->vsi->base_queue);
141 /* Init the RX tail regieter. */
142 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
148 * i40e_fdir_setup - reserve and initialize the Flow Director resources
149 * @pf: board private structure
152 i40e_fdir_setup(struct i40e_pf *pf)
154 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
155 struct i40e_vsi *vsi;
156 int err = I40E_SUCCESS;
157 char z_name[RTE_MEMZONE_NAMESIZE];
158 const struct rte_memzone *mz = NULL;
159 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
161 if ((pf->flags & I40E_FLAG_FDIR) == 0) {
162 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
163 return I40E_NOT_SUPPORTED;
166 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u,"
167 " num_filters_best_effort = %u.",
168 hw->func_caps.fd_filters_guaranteed,
169 hw->func_caps.fd_filters_best_effort);
171 vsi = pf->fdir.fdir_vsi;
173 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
176 /* make new FDIR VSI */
177 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
179 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
180 return I40E_ERR_NO_AVAILABLE_VSI;
182 pf->fdir.fdir_vsi = vsi;
184 /*Fdir tx queue setup*/
185 err = i40e_fdir_setup_tx_resources(pf);
187 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
191 /*Fdir rx queue setup*/
192 err = i40e_fdir_setup_rx_resources(pf);
194 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
198 err = i40e_tx_queue_init(pf->fdir.txq);
200 PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization.");
204 /* need switch on before dev start*/
205 err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE);
207 PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on.");
211 /* Init the rx queue in hardware */
212 err = i40e_fdir_rx_queue_init(pf->fdir.rxq);
214 PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization.");
218 /* switch on rx queue */
219 err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE);
221 PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on.");
225 /* reserve memory for the fdir programming packet */
226 snprintf(z_name, sizeof(z_name), "%s_%s_%d",
227 eth_dev->device->driver->name,
229 eth_dev->data->port_id);
230 mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
232 PMD_DRV_LOG(ERR, "Cannot init memzone for "
233 "flow director program packet.");
234 err = I40E_ERR_NO_MEMORY;
237 pf->fdir.prg_pkt = mz->addr;
238 pf->fdir.dma_addr = mz->iova;
240 pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
241 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
246 i40e_dev_rx_queue_release(pf->fdir.rxq);
249 i40e_dev_tx_queue_release(pf->fdir.txq);
252 i40e_vsi_release(vsi);
253 pf->fdir.fdir_vsi = NULL;
258 * i40e_fdir_teardown - release the Flow Director resources
259 * @pf: board private structure
262 i40e_fdir_teardown(struct i40e_pf *pf)
264 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
265 struct i40e_vsi *vsi;
267 vsi = pf->fdir.fdir_vsi;
270 int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
272 PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off");
273 err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
275 PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
276 i40e_dev_rx_queue_release(pf->fdir.rxq);
278 i40e_dev_tx_queue_release(pf->fdir.txq);
280 i40e_vsi_release(vsi);
281 pf->fdir.fdir_vsi = NULL;
284 /* check whether the flow director table in empty */
286 i40e_fdir_empty(struct i40e_hw *hw)
288 uint32_t guarant_cnt, best_cnt;
290 guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
291 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
292 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
293 best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
294 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
295 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
296 if (best_cnt + guarant_cnt > 0)
303 * Initialize the configuration about bytes stream extracted as flexible payload
307 i40e_init_flx_pld(struct i40e_pf *pf)
309 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
315 * Define the bytes stream extracted as flexible payload in
316 * field vector. By default, select 8 words from the beginning
317 * of payload as flexible payload.
319 for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) {
320 index = i * I40E_MAX_FLXPLD_FIED;
321 pf->fdir.flex_set[index].src_offset = 0;
322 pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM;
323 pf->fdir.flex_set[index].dst_offset = 0;
324 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900);
326 I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/
328 I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/
331 /* initialize the masks */
332 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
333 pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
334 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
336 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
338 pf->fdir.flex_mask[pctype].word_mask = 0;
339 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
340 for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
341 pf->fdir.flex_mask[pctype].bitmask[i].offset = 0;
342 pf->fdir.flex_mask[pctype].bitmask[i].mask = 0;
343 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0);
348 #define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
349 if ((flex_pit2).src_offset < \
350 (flex_pit1).src_offset + (flex_pit1).size) { \
351 PMD_DRV_LOG(ERR, "src_offset should be not" \
352 " less than than previous offset" \
353 " + previous FSIZE."); \
359 * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure,
360 * and the flex_pit will be sorted by it's src_offset value
362 static inline uint16_t
363 i40e_srcoff_to_flx_pit(const uint16_t *src_offset,
364 struct i40e_fdir_flex_pit *flex_pit)
366 uint16_t src_tmp, size, num = 0;
367 uint16_t i, k, j = 0;
369 while (j < I40E_FDIR_MAX_FLEX_LEN) {
371 for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) {
372 if (src_offset[j + 1] == src_offset[j] + 1)
377 src_tmp = src_offset[j] + 1 - size;
378 /* the flex_pit need to be sort by src_offset */
379 for (i = 0; i < num; i++) {
380 if (src_tmp < flex_pit[i].src_offset)
383 /* if insert required, move backward */
384 for (k = num; k > i; k--)
385 flex_pit[k] = flex_pit[k - 1];
387 flex_pit[i].dst_offset = j + 1 - size;
388 flex_pit[i].src_offset = src_tmp;
389 flex_pit[i].size = size;
396 /* i40e_check_fdir_flex_payload -check flex payload configuration arguments */
398 i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
400 struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN];
403 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
404 if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
405 PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
410 memset(flex_pit, 0, sizeof(flex_pit));
411 num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
412 if (num > I40E_MAX_FLXPLD_FIED) {
413 PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
416 for (i = 0; i < num; i++) {
417 if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 ||
418 flex_pit[i].src_offset & 0x01) {
419 PMD_DRV_LOG(ERR, "flexpayload should be measured"
424 I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]);
430 * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration
431 * arguments are valid
434 i40e_check_fdir_flex_conf(const struct i40e_adapter *adapter,
435 const struct rte_eth_fdir_flex_conf *conf)
437 const struct rte_eth_flex_payload_cfg *flex_cfg;
438 const struct rte_eth_fdir_flex_mask *flex_mask;
443 enum i40e_filter_pctype pctype;
446 PMD_DRV_LOG(INFO, "NULL pointer.");
449 /* check flexible payload setting configuration */
450 if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) {
451 PMD_DRV_LOG(ERR, "invalid number of payload setting.");
454 for (i = 0; i < conf->nb_payloads; i++) {
455 flex_cfg = &conf->flex_set[i];
456 if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) {
457 PMD_DRV_LOG(ERR, "invalid payload type.");
460 ret = i40e_check_fdir_flex_payload(flex_cfg);
462 PMD_DRV_LOG(ERR, "invalid flex payload arguments.");
467 /* check flex mask setting configuration */
468 if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) {
469 PMD_DRV_LOG(ERR, "invalid number of flex masks.");
472 for (i = 0; i < conf->nb_flexmasks; i++) {
473 flex_mask = &conf->flex_mask[i];
474 pctype = i40e_flowtype_to_pctype(adapter, flex_mask->flow_type);
475 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
476 PMD_DRV_LOG(WARNING, "invalid flow type.");
480 for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) {
481 mask_tmp = I40E_WORD(flex_mask->mask[j],
482 flex_mask->mask[j + 1]);
483 if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) {
485 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) {
486 PMD_DRV_LOG(ERR, " exceed maximal"
487 " number of bitmasks.");
497 * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload
498 * @pf: board private structure
499 * @cfg: the rule how bytes stream is extracted as flexible payload
502 i40e_set_flx_pld_cfg(struct i40e_pf *pf,
503 const struct rte_eth_flex_payload_cfg *cfg)
505 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
506 struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
507 uint32_t flx_pit, flx_ort;
508 uint16_t num, min_next_off; /* in words */
509 uint8_t field_idx = 0;
510 uint8_t layer_idx = 0;
513 if (cfg->type == RTE_ETH_L2_PAYLOAD)
514 layer_idx = I40E_FLXPLD_L2_IDX;
515 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
516 layer_idx = I40E_FLXPLD_L3_IDX;
517 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
518 layer_idx = I40E_FLXPLD_L4_IDX;
520 memset(flex_pit, 0, sizeof(flex_pit));
521 num = RTE_MIN(i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit),
525 flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
526 (num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
527 (layer_idx * I40E_MAX_FLXPLD_FIED);
528 I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
529 i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD);
532 for (i = 0; i < num; i++) {
533 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
534 /* record the info in fdir structure */
535 pf->fdir.flex_set[field_idx].src_offset =
536 flex_pit[i].src_offset / sizeof(uint16_t);
537 pf->fdir.flex_set[field_idx].size =
538 flex_pit[i].size / sizeof(uint16_t);
539 pf->fdir.flex_set[field_idx].dst_offset =
540 flex_pit[i].dst_offset / sizeof(uint16_t);
541 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
542 pf->fdir.flex_set[field_idx].size,
543 pf->fdir.flex_set[field_idx].dst_offset);
545 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
547 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
548 pf->fdir.flex_set[field_idx].size;
550 for (; i < I40E_MAX_FLXPLD_FIED; i++) {
551 /* set the non-used register obeying register's constrain */
552 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
553 NONUSE_FLX_PIT_DEST_OFF);
555 I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i),
562 * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload
563 * @pf: board private structure
564 * @pctype: packet classify type
565 * @flex_masks: mask for flexible payload
568 i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
569 enum i40e_filter_pctype pctype,
570 const struct rte_eth_fdir_flex_mask *mask_cfg)
572 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
573 struct i40e_fdir_flex_mask *flex_mask;
574 uint32_t flxinset, fd_mask;
576 uint8_t i, nb_bitmask = 0;
578 flex_mask = &pf->fdir.flex_mask[pctype];
579 memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
580 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
581 mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]);
582 if (mask_tmp != 0x0) {
583 flex_mask->word_mask |=
584 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
585 if (mask_tmp != UINT16_MAX) {
587 flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp;
588 flex_mask->bitmask[nb_bitmask].offset =
589 i / sizeof(uint16_t);
594 /* write mask to hw */
595 flxinset = (flex_mask->word_mask <<
596 I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
597 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
598 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
600 for (i = 0; i < nb_bitmask; i++) {
601 fd_mask = (flex_mask->bitmask[i].mask <<
602 I40E_PRTQF_FD_MSK_MASK_SHIFT) &
603 I40E_PRTQF_FD_MSK_MASK_MASK;
604 fd_mask |= ((flex_mask->bitmask[i].offset +
605 I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
606 I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
607 I40E_PRTQF_FD_MSK_OFFSET_MASK;
608 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
613 * Configure flow director related setting
616 i40e_fdir_configure(struct rte_eth_dev *dev)
618 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
619 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
620 struct rte_eth_fdir_flex_conf *conf;
621 enum i40e_filter_pctype pctype;
627 * configuration need to be done before
628 * flow director filters are added
629 * If filters exist, flush them.
631 if (i40e_fdir_empty(hw) < 0) {
632 ret = i40e_fdir_flush(dev);
634 PMD_DRV_LOG(ERR, "failed to flush fdir table.");
639 /* enable FDIR filter */
640 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
641 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
642 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
644 i40e_init_flx_pld(pf); /* set flex config to default value */
646 conf = &dev->data->dev_conf.fdir_conf.flex_conf;
647 ret = i40e_check_fdir_flex_conf(pf->adapter, conf);
649 PMD_DRV_LOG(ERR, " invalid configuration arguments.");
652 /* configure flex payload */
653 for (i = 0; i < conf->nb_payloads; i++)
654 i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
655 /* configure flex mask*/
656 for (i = 0; i < conf->nb_flexmasks; i++) {
657 if (hw->mac.type == I40E_MAC_X722) {
658 /* get translated pctype value in fd pctype register */
659 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
660 hw, I40E_GLQF_FD_PCTYPES(
661 (int)i40e_flowtype_to_pctype(pf->adapter,
662 conf->flex_mask[i].flow_type)));
664 pctype = i40e_flowtype_to_pctype(pf->adapter,
665 conf->flex_mask[i].flow_type);
667 i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]);
674 i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
675 unsigned char *raw_pkt,
678 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
679 uint16_t *ether_type;
680 uint8_t len = 2 * sizeof(struct ether_addr);
682 struct ipv6_hdr *ip6;
683 static const uint8_t next_proto[] = {
684 [RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP,
685 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
686 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
687 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = IPPROTO_SCTP,
688 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = IPPROTO_IP,
689 [RTE_ETH_FLOW_FRAG_IPV6] = IPPROTO_NONE,
690 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
691 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
692 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = IPPROTO_SCTP,
693 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = IPPROTO_NONE,
696 raw_pkt += 2 * sizeof(struct ether_addr);
697 if (vlan && fdir_input->flow_ext.vlan_tci) {
698 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
699 rte_memcpy(raw_pkt + sizeof(uint16_t),
700 &fdir_input->flow_ext.vlan_tci,
702 raw_pkt += sizeof(vlan_frame);
703 len += sizeof(vlan_frame);
705 ether_type = (uint16_t *)raw_pkt;
706 raw_pkt += sizeof(uint16_t);
707 len += sizeof(uint16_t);
709 switch (fdir_input->flow_type) {
710 case RTE_ETH_FLOW_L2_PAYLOAD:
711 *ether_type = fdir_input->flow.l2_flow.ether_type;
713 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
714 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
715 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
716 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
717 case RTE_ETH_FLOW_FRAG_IPV4:
718 ip = (struct ipv4_hdr *)raw_pkt;
720 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
721 ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
722 /* set len to by default */
723 ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
724 ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
725 fdir_input->flow.ip4_flow.proto :
726 next_proto[fdir_input->flow_type];
727 ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
728 fdir_input->flow.ip4_flow.ttl :
729 I40E_FDIR_IP_DEFAULT_TTL;
730 ip->type_of_service = fdir_input->flow.ip4_flow.tos;
732 * The source and destination fields in the transmitted packet
733 * need to be presented in a reversed order with respect
734 * to the expected received packets.
736 ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
737 ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
738 len += sizeof(struct ipv4_hdr);
740 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
741 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
742 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
743 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
744 case RTE_ETH_FLOW_FRAG_IPV6:
745 ip6 = (struct ipv6_hdr *)raw_pkt;
747 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
749 rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
750 (fdir_input->flow.ipv6_flow.tc <<
751 I40E_FDIR_IPv6_TC_OFFSET));
753 rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
754 ip6->proto = fdir_input->flow.ipv6_flow.proto ?
755 fdir_input->flow.ipv6_flow.proto :
756 next_proto[fdir_input->flow_type];
757 ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
758 fdir_input->flow.ipv6_flow.hop_limits :
759 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
761 * The source and destination fields in the transmitted packet
762 * need to be presented in a reversed order with respect
763 * to the expected received packets.
765 rte_memcpy(&(ip6->src_addr),
766 &(fdir_input->flow.ipv6_flow.dst_ip),
768 rte_memcpy(&(ip6->dst_addr),
769 &(fdir_input->flow.ipv6_flow.src_ip),
771 len += sizeof(struct ipv6_hdr);
774 PMD_DRV_LOG(ERR, "unknown flow type %u.",
775 fdir_input->flow_type);
783 * i40e_fdir_construct_pkt - construct packet based on fields in input
784 * @pf: board private structure
785 * @fdir_input: input set of the flow director entry
786 * @raw_pkt: a packet to be constructed
789 i40e_fdir_construct_pkt(struct i40e_pf *pf,
790 const struct rte_eth_fdir_input *fdir_input,
791 unsigned char *raw_pkt)
793 unsigned char *payload, *ptr;
796 struct sctp_hdr *sctp;
797 uint8_t size, dst = 0;
798 uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
801 /* fill the ethernet and IP head */
802 len = i40e_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
803 !!fdir_input->flow_ext.vlan_tci);
807 /* fill the L4 head */
808 switch (fdir_input->flow_type) {
809 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
810 udp = (struct udp_hdr *)(raw_pkt + len);
811 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
813 * The source and destination fields in the transmitted packet
814 * need to be presented in a reversed order with respect
815 * to the expected received packets.
817 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
818 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
819 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
822 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
823 tcp = (struct tcp_hdr *)(raw_pkt + len);
824 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
826 * The source and destination fields in the transmitted packet
827 * need to be presented in a reversed order with respect
828 * to the expected received packets.
830 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
831 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
832 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
835 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
836 sctp = (struct sctp_hdr *)(raw_pkt + len);
837 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
839 * The source and destination fields in the transmitted packet
840 * need to be presented in a reversed order with respect
841 * to the expected received packets.
843 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
844 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
845 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
848 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
849 case RTE_ETH_FLOW_FRAG_IPV4:
850 payload = raw_pkt + len;
851 set_idx = I40E_FLXPLD_L3_IDX;
854 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
855 udp = (struct udp_hdr *)(raw_pkt + len);
856 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
858 * The source and destination fields in the transmitted packet
859 * need to be presented in a reversed order with respect
860 * to the expected received packets.
862 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
863 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
864 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
867 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
868 tcp = (struct tcp_hdr *)(raw_pkt + len);
869 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
871 * The source and destination fields in the transmitted packet
872 * need to be presented in a reversed order with respect
873 * to the expected received packets.
875 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
876 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
877 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
880 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
881 sctp = (struct sctp_hdr *)(raw_pkt + len);
882 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
884 * The source and destination fields in the transmitted packet
885 * need to be presented in a reversed order with respect
886 * to the expected received packets.
888 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
889 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
890 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
893 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
894 case RTE_ETH_FLOW_FRAG_IPV6:
895 payload = raw_pkt + len;
896 set_idx = I40E_FLXPLD_L3_IDX;
898 case RTE_ETH_FLOW_L2_PAYLOAD:
899 payload = raw_pkt + len;
901 * ARP packet is a special case on which the payload
902 * starts after the whole ARP header
904 if (fdir_input->flow.l2_flow.ether_type ==
905 rte_cpu_to_be_16(ETHER_TYPE_ARP))
906 payload += sizeof(struct arp_hdr);
907 set_idx = I40E_FLXPLD_L2_IDX;
910 PMD_DRV_LOG(ERR, "unknown flow type %u.", fdir_input->flow_type);
914 /* fill the flexbytes to payload */
915 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
916 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
917 size = pf->fdir.flex_set[pit_idx].size;
920 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
922 pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
924 &fdir_input->flow_ext.flexbytes[dst],
925 size * sizeof(uint16_t));
931 static struct i40e_customized_pctype *
932 i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype)
934 struct i40e_customized_pctype *cus_pctype;
935 enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC;
937 for (; i < I40E_CUSTOMIZED_MAX; i++) {
938 cus_pctype = &pf->customized_pctype[i];
939 if (pctype == cus_pctype->pctype)
946 i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
947 const struct i40e_fdir_input *fdir_input,
948 unsigned char *raw_pkt,
951 struct i40e_customized_pctype *cus_pctype = NULL;
952 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
953 uint16_t *ether_type;
954 uint8_t len = 2 * sizeof(struct ether_addr);
956 struct ipv6_hdr *ip6;
957 uint8_t pctype = fdir_input->pctype;
958 bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
959 static const uint8_t next_proto[] = {
960 [I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
961 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
962 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
963 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
964 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
965 [I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
966 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
967 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
968 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
969 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
972 raw_pkt += 2 * sizeof(struct ether_addr);
973 if (vlan && fdir_input->flow_ext.vlan_tci) {
974 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
975 rte_memcpy(raw_pkt + sizeof(uint16_t),
976 &fdir_input->flow_ext.vlan_tci,
978 raw_pkt += sizeof(vlan_frame);
979 len += sizeof(vlan_frame);
981 ether_type = (uint16_t *)raw_pkt;
982 raw_pkt += sizeof(uint16_t);
983 len += sizeof(uint16_t);
985 if (is_customized_pctype) {
986 cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
988 PMD_DRV_LOG(ERR, "unknown pctype %u.",
994 if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
995 *ether_type = fdir_input->flow.l2_flow.ether_type;
996 else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
997 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
998 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
999 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1000 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
1001 is_customized_pctype) {
1002 ip = (struct ipv4_hdr *)raw_pkt;
1004 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
1005 ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
1006 /* set len to by default */
1007 ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
1008 ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
1009 fdir_input->flow.ip4_flow.ttl :
1010 I40E_FDIR_IP_DEFAULT_TTL;
1011 ip->type_of_service = fdir_input->flow.ip4_flow.tos;
1013 * The source and destination fields in the transmitted packet
1014 * need to be presented in a reversed order with respect
1015 * to the expected received packets.
1017 ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
1018 ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
1020 if (!is_customized_pctype)
1021 ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
1022 fdir_input->flow.ip4_flow.proto :
1023 next_proto[fdir_input->pctype];
1024 else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1025 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1026 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1027 cus_pctype->index == I40E_CUSTOMIZED_GTPU)
1028 ip->next_proto_id = IPPROTO_UDP;
1029 len += sizeof(struct ipv4_hdr);
1030 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
1031 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
1032 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
1033 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1034 pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
1035 ip6 = (struct ipv6_hdr *)raw_pkt;
1037 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
1039 rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
1040 (fdir_input->flow.ipv6_flow.tc <<
1041 I40E_FDIR_IPv6_TC_OFFSET));
1043 rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
1044 ip6->proto = fdir_input->flow.ipv6_flow.proto ?
1045 fdir_input->flow.ipv6_flow.proto :
1046 next_proto[fdir_input->pctype];
1047 ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
1048 fdir_input->flow.ipv6_flow.hop_limits :
1049 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
1051 * The source and destination fields in the transmitted packet
1052 * need to be presented in a reversed order with respect
1053 * to the expected received packets.
1055 rte_memcpy(&ip6->src_addr,
1056 &fdir_input->flow.ipv6_flow.dst_ip,
1058 rte_memcpy(&ip6->dst_addr,
1059 &fdir_input->flow.ipv6_flow.src_ip,
1061 len += sizeof(struct ipv6_hdr);
1063 PMD_DRV_LOG(ERR, "unknown pctype %u.",
1064 fdir_input->pctype);
1072 * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
1073 * @pf: board private structure
1074 * @fdir_input: input set of the flow director entry
1075 * @raw_pkt: a packet to be constructed
1078 i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
1079 const struct i40e_fdir_input *fdir_input,
1080 unsigned char *raw_pkt)
1082 unsigned char *payload = NULL;
1084 struct udp_hdr *udp;
1085 struct tcp_hdr *tcp;
1086 struct sctp_hdr *sctp;
1087 struct rte_flow_item_gtp *gtp;
1088 struct ipv4_hdr *gtp_ipv4;
1089 struct ipv6_hdr *gtp_ipv6;
1090 uint8_t size, dst = 0;
1091 uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
1093 uint8_t pctype = fdir_input->pctype;
1094 struct i40e_customized_pctype *cus_pctype;
1096 /* raw pcket template - just copy contents of the raw packet */
1097 if (fdir_input->flow_ext.pkt_template) {
1098 memcpy(raw_pkt, fdir_input->flow.raw_flow.packet,
1099 fdir_input->flow.raw_flow.length);
1103 /* fill the ethernet and IP head */
1104 len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
1105 !!fdir_input->flow_ext.vlan_tci);
1109 /* fill the L4 head */
1110 if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
1111 udp = (struct udp_hdr *)(raw_pkt + len);
1112 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
1114 * The source and destination fields in the transmitted packet
1115 * need to be presented in a reversed order with respect
1116 * to the expected received packets.
1118 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
1119 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
1120 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1121 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
1122 tcp = (struct tcp_hdr *)(raw_pkt + len);
1123 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
1125 * The source and destination fields in the transmitted packet
1126 * need to be presented in a reversed order with respect
1127 * to the expected received packets.
1129 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
1130 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
1131 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1132 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
1133 sctp = (struct sctp_hdr *)(raw_pkt + len);
1134 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
1136 * The source and destination fields in the transmitted packet
1137 * need to be presented in a reversed order with respect
1138 * to the expected received packets.
1140 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
1141 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
1142 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
1143 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1144 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
1145 payload = raw_pkt + len;
1146 set_idx = I40E_FLXPLD_L3_IDX;
1147 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
1148 udp = (struct udp_hdr *)(raw_pkt + len);
1149 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
1151 * The source and destination fields in the transmitted packet
1152 * need to be presented in a reversed order with respect
1153 * to the expected received packets.
1155 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
1156 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
1157 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
1158 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
1159 tcp = (struct tcp_hdr *)(raw_pkt + len);
1160 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
1162 * The source and destination fields in the transmitted packet
1163 * need to be presented in a reversed order with respect
1164 * to the expected received packets.
1166 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1167 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
1168 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
1169 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
1170 sctp = (struct sctp_hdr *)(raw_pkt + len);
1171 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
1173 * The source and destination fields in the transmitted packet
1174 * need to be presented in a reversed order with respect
1175 * to the expected received packets.
1177 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
1178 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
1179 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
1180 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1181 pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
1182 payload = raw_pkt + len;
1183 set_idx = I40E_FLXPLD_L3_IDX;
1184 } else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1185 payload = raw_pkt + len;
1187 * ARP packet is a special case on which the payload
1188 * starts after the whole ARP header
1190 if (fdir_input->flow.l2_flow.ether_type ==
1191 rte_cpu_to_be_16(ETHER_TYPE_ARP))
1192 payload += sizeof(struct arp_hdr);
1193 set_idx = I40E_FLXPLD_L2_IDX;
1194 } else if (fdir_input->flow_ext.customized_pctype) {
1195 /* If customized pctype is used */
1196 cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
1197 if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1198 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1199 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1200 cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
1201 udp = (struct udp_hdr *)(raw_pkt + len);
1203 rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1205 gtp = (struct rte_flow_item_gtp *)
1206 ((unsigned char *)udp + sizeof(struct udp_hdr));
1208 rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
1209 gtp->teid = fdir_input->flow.gtp_flow.teid;
1210 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0X01;
1212 /* GTP-C message type is not supported. */
1213 if (cus_pctype->index == I40E_CUSTOMIZED_GTPC) {
1215 rte_cpu_to_be_16(I40E_FDIR_GTPC_DST_PORT);
1216 gtp->v_pt_rsv_flags =
1217 I40E_FDIR_GTP_VER_FLAG_0X32;
1220 rte_cpu_to_be_16(I40E_FDIR_GTPU_DST_PORT);
1221 gtp->v_pt_rsv_flags =
1222 I40E_FDIR_GTP_VER_FLAG_0X30;
1225 if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
1226 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1227 gtp_ipv4 = (struct ipv4_hdr *)
1228 ((unsigned char *)gtp +
1229 sizeof(struct rte_flow_item_gtp));
1230 gtp_ipv4->version_ihl =
1231 I40E_FDIR_IP_DEFAULT_VERSION_IHL;
1232 gtp_ipv4->next_proto_id = IPPROTO_IP;
1233 gtp_ipv4->total_length =
1235 I40E_FDIR_INNER_IP_DEFAULT_LEN);
1236 payload = (unsigned char *)gtp_ipv4 +
1237 sizeof(struct ipv4_hdr);
1238 } else if (cus_pctype->index ==
1239 I40E_CUSTOMIZED_GTPU_IPV6) {
1240 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1241 gtp_ipv6 = (struct ipv6_hdr *)
1242 ((unsigned char *)gtp +
1243 sizeof(struct rte_flow_item_gtp));
1244 gtp_ipv6->vtc_flow =
1246 I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
1247 (0 << I40E_FDIR_IPv6_TC_OFFSET));
1248 gtp_ipv6->proto = IPPROTO_NONE;
1249 gtp_ipv6->payload_len =
1251 I40E_FDIR_INNER_IPV6_DEFAULT_LEN);
1252 gtp_ipv6->hop_limits =
1253 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
1254 payload = (unsigned char *)gtp_ipv6 +
1255 sizeof(struct ipv6_hdr);
1257 payload = (unsigned char *)gtp +
1258 sizeof(struct rte_flow_item_gtp);
1261 PMD_DRV_LOG(ERR, "unknown pctype %u.",
1262 fdir_input->pctype);
1266 /* fill the flexbytes to payload */
1267 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
1268 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
1269 size = pf->fdir.flex_set[pit_idx].size;
1272 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
1274 pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
1275 (void)rte_memcpy(ptr,
1276 &fdir_input->flow_ext.flexbytes[dst],
1277 size * sizeof(uint16_t));
1283 /* Construct the tx flags */
1284 static inline uint64_t
1285 i40e_build_ctob(uint32_t td_cmd,
1290 return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
1291 ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
1292 ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
1293 ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
1294 ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
1298 * check the programming status descriptor in rx queue.
1299 * done after Programming Flow Director is programmed on
1303 i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
1305 volatile union i40e_rx_desc *rxdp;
1312 rxdp = &rxq->rx_ring[rxq->rx_tail];
1313 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1314 rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
1315 >> I40E_RXD_QW1_STATUS_SHIFT;
1317 if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
1318 len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT;
1319 id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1320 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1322 if (len == I40E_RX_PROG_STATUS_DESC_LENGTH &&
1323 id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) {
1325 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
1326 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
1327 if (error == (0x1 <<
1328 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
1329 PMD_DRV_LOG(ERR, "Failed to add FDIR filter"
1330 " (FD_ID %u): programming status"
1332 rxdp->wb.qword0.hi_dword.fd_id);
1334 } else if (error == (0x1 <<
1335 I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
1336 PMD_DRV_LOG(ERR, "Failed to delete FDIR filter"
1337 " (FD_ID %u): programming status"
1339 rxdp->wb.qword0.hi_dword.fd_id);
1342 PMD_DRV_LOG(ERR, "invalid programming status"
1343 " reported, error = %u.", error);
1345 PMD_DRV_LOG(INFO, "unknown programming status"
1346 " reported, len = %d, id = %u.", len, id);
1347 rxdp->wb.qword1.status_error_len = 0;
1349 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
1351 if (rxq->rx_tail == 0)
1352 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1354 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
1361 i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
1362 struct i40e_fdir_filter *filter)
1364 rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
1365 if (input->input.flow_ext.pkt_template) {
1366 filter->fdir.input.flow.raw_flow.packet = NULL;
1367 filter->fdir.input.flow.raw_flow.length =
1368 rte_hash_crc(input->input.flow.raw_flow.packet,
1369 input->input.flow.raw_flow.length,
1370 input->input.flow.raw_flow.pctype);
1375 /* Check if there exists the flow director filter */
1376 static struct i40e_fdir_filter *
1377 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
1378 const struct i40e_fdir_input *input)
1382 if (input->flow_ext.pkt_template)
1383 ret = rte_hash_lookup_with_hash(fdir_info->hash_table,
1384 (const void *)input,
1385 input->flow.raw_flow.length);
1387 ret = rte_hash_lookup(fdir_info->hash_table,
1388 (const void *)input);
1392 return fdir_info->hash_map[ret];
1395 /* Add a flow director filter into the SW list */
1397 i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
1399 struct i40e_fdir_info *fdir_info = &pf->fdir;
1402 if (filter->fdir.input.flow_ext.pkt_template)
1403 ret = rte_hash_add_key_with_hash(fdir_info->hash_table,
1404 &filter->fdir.input,
1405 filter->fdir.input.flow.raw_flow.length);
1407 ret = rte_hash_add_key(fdir_info->hash_table,
1408 &filter->fdir.input);
1411 "Failed to insert fdir filter to hash table %d!",
1415 fdir_info->hash_map[ret] = filter;
1417 TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
1422 /* Delete a flow director filter from the SW list */
1424 i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
1426 struct i40e_fdir_info *fdir_info = &pf->fdir;
1427 struct i40e_fdir_filter *filter;
1430 if (input->flow_ext.pkt_template)
1431 ret = rte_hash_del_key_with_hash(fdir_info->hash_table,
1433 input->flow.raw_flow.length);
1435 ret = rte_hash_del_key(fdir_info->hash_table, input);
1438 "Failed to delete fdir filter to hash table %d!",
1442 filter = fdir_info->hash_map[ret];
1443 fdir_info->hash_map[ret] = NULL;
1445 TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
1452 * i40e_add_del_fdir_filter - add or remove a flow director filter.
1453 * @pf: board private structure
1454 * @filter: fdir filter entry
1455 * @add: 0 - delete, 1 - add
1458 i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
1459 const struct rte_eth_fdir_filter *filter,
1462 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1463 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1464 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1465 enum i40e_filter_pctype pctype;
1468 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1469 PMD_DRV_LOG(ERR, "FDIR is not enabled, please"
1470 " check the mode in fdir_conf.");
1474 pctype = i40e_flowtype_to_pctype(pf->adapter, filter->input.flow_type);
1475 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
1476 PMD_DRV_LOG(ERR, "invalid flow_type input.");
1479 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1480 PMD_DRV_LOG(ERR, "Invalid queue ID");
1483 if (filter->input.flow_ext.is_vf &&
1484 filter->input.flow_ext.dst_id >= pf->vf_num) {
1485 PMD_DRV_LOG(ERR, "Invalid VF ID");
1489 memset(pkt, 0, I40E_FDIR_PKT_LEN);
1491 ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
1493 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1497 if (hw->mac.type == I40E_MAC_X722) {
1498 /* get translated pctype value in fd pctype register */
1499 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1500 hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1503 ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
1505 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1514 * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
1515 * @pf: board private structure
1516 * @filter: fdir filter entry
1517 * @add: 0 - delete, 1 - add
1520 i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
1521 const struct i40e_fdir_filter_conf *filter,
1524 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1525 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1526 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1527 enum i40e_filter_pctype pctype;
1528 struct i40e_fdir_info *fdir_info = &pf->fdir;
1529 struct i40e_fdir_filter *fdir_filter, *node;
1530 struct i40e_fdir_filter check_filter; /* Check if the filter exists */
1533 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1534 PMD_DRV_LOG(ERR, "FDIR is not enabled, please check the mode in fdir_conf.");
1538 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1539 PMD_DRV_LOG(ERR, "Invalid queue ID");
1542 if (filter->input.flow_ext.is_vf &&
1543 filter->input.flow_ext.dst_id >= pf->vf_num) {
1544 PMD_DRV_LOG(ERR, "Invalid VF ID");
1547 if (filter->input.flow_ext.pkt_template) {
1548 if (filter->input.flow.raw_flow.length > I40E_FDIR_PKT_LEN ||
1549 !filter->input.flow.raw_flow.packet) {
1550 PMD_DRV_LOG(ERR, "Invalid raw packet template"
1551 " flow filter parameters!");
1554 pctype = filter->input.flow.raw_flow.pctype;
1556 pctype = filter->input.pctype;
1559 /* Check if there is the filter in SW list */
1560 memset(&check_filter, 0, sizeof(check_filter));
1561 i40e_fdir_filter_convert(filter, &check_filter);
1562 node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
1565 "Conflict with existing flow director rules!");
1569 if (!add && !node) {
1571 "There's no corresponding flow firector filter!");
1575 memset(pkt, 0, I40E_FDIR_PKT_LEN);
1577 ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
1579 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1583 if (hw->mac.type == I40E_MAC_X722) {
1584 /* get translated pctype value in fd pctype register */
1585 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1586 hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1589 ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add);
1591 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1597 fdir_filter = rte_zmalloc("fdir_filter",
1598 sizeof(*fdir_filter), 0);
1599 if (fdir_filter == NULL) {
1600 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
1604 rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
1605 ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
1607 rte_free(fdir_filter);
1609 ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
1616 * i40e_fdir_filter_programming - Program a flow director filter rule.
1617 * Is done by Flow Director Programming Descriptor followed by packet
1618 * structure that contains the filter fields need to match.
1619 * @pf: board private structure
1621 * @filter: fdir filter entry
1622 * @add: 0 - delete, 1 - add
1625 i40e_fdir_filter_programming(struct i40e_pf *pf,
1626 enum i40e_filter_pctype pctype,
1627 const struct rte_eth_fdir_filter *filter,
1630 struct i40e_tx_queue *txq = pf->fdir.txq;
1631 struct i40e_rx_queue *rxq = pf->fdir.rxq;
1632 const struct rte_eth_fdir_action *fdir_action = &filter->action;
1633 volatile struct i40e_tx_desc *txdp;
1634 volatile struct i40e_filter_program_desc *fdirdp;
1639 PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1640 fdirdp = (volatile struct i40e_filter_program_desc *)
1641 (&(txq->tx_ring[txq->tx_tail]));
1643 fdirdp->qindex_flex_ptype_vsi =
1644 rte_cpu_to_le_32((fdir_action->rx_queue <<
1645 I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1646 I40E_TXD_FLTR_QW0_QINDEX_MASK);
1648 fdirdp->qindex_flex_ptype_vsi |=
1649 rte_cpu_to_le_32((fdir_action->flex_off <<
1650 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1651 I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1653 fdirdp->qindex_flex_ptype_vsi |=
1654 rte_cpu_to_le_32((pctype <<
1655 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1656 I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1658 if (filter->input.flow_ext.is_vf)
1659 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1661 /* Use LAN VSI Id by default */
1662 vsi_id = pf->main_vsi->vsi_id;
1663 fdirdp->qindex_flex_ptype_vsi |=
1664 rte_cpu_to_le_32(((uint32_t)vsi_id <<
1665 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1666 I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1668 fdirdp->dtype_cmd_cntindex =
1669 rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1672 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1673 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1674 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1676 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1677 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1678 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1680 if (fdir_action->behavior == RTE_ETH_FDIR_REJECT)
1681 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1682 else if (fdir_action->behavior == RTE_ETH_FDIR_ACCEPT)
1683 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1684 else if (fdir_action->behavior == RTE_ETH_FDIR_PASSTHRU)
1685 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1687 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1688 " unsupported fdir behavior.");
1692 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1693 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1694 I40E_TXD_FLTR_QW1_DEST_MASK);
1696 fdirdp->dtype_cmd_cntindex |=
1697 rte_cpu_to_le_32((fdir_action->report_status<<
1698 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
1699 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
1701 fdirdp->dtype_cmd_cntindex |=
1702 rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
1703 fdirdp->dtype_cmd_cntindex |=
1705 ((uint32_t)pf->fdir.match_counter_index <<
1706 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1707 I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
1709 fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
1711 PMD_DRV_LOG(INFO, "filling transmit descriptor.");
1712 txdp = &(txq->tx_ring[txq->tx_tail + 1]);
1713 txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
1714 td_cmd = I40E_TX_DESC_CMD_EOP |
1715 I40E_TX_DESC_CMD_RS |
1716 I40E_TX_DESC_CMD_DUMMY;
1718 txdp->cmd_type_offset_bsz =
1719 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
1721 txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
1722 if (txq->tx_tail >= txq->nb_tx_desc)
1724 /* Update the tx tail register */
1726 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
1727 for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
1728 if ((txdp->cmd_type_offset_bsz &
1729 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1730 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1734 if (i >= I40E_FDIR_MAX_WAIT_US) {
1735 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1736 " time out to get DD on tx queue.");
1739 /* totally delay 10 ms to check programming status*/
1740 for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
1741 if (i40e_check_fdir_programming_status(rxq) >= 0)
1746 "Failed to program FDIR filter: programming status reported.");
1751 * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
1752 * Is done by Flow Director Programming Descriptor followed by packet
1753 * structure that contains the filter fields need to match.
1754 * @pf: board private structure
1756 * @filter: fdir filter entry
1757 * @add: 0 - delete, 1 - add
1760 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
1761 enum i40e_filter_pctype pctype,
1762 const struct i40e_fdir_filter_conf *filter,
1765 struct i40e_tx_queue *txq = pf->fdir.txq;
1766 struct i40e_rx_queue *rxq = pf->fdir.rxq;
1767 const struct i40e_fdir_action *fdir_action = &filter->action;
1768 volatile struct i40e_tx_desc *txdp;
1769 volatile struct i40e_filter_program_desc *fdirdp;
1774 PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1775 fdirdp = (volatile struct i40e_filter_program_desc *)
1776 (&txq->tx_ring[txq->tx_tail]);
1778 fdirdp->qindex_flex_ptype_vsi =
1779 rte_cpu_to_le_32((fdir_action->rx_queue <<
1780 I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1781 I40E_TXD_FLTR_QW0_QINDEX_MASK);
1783 fdirdp->qindex_flex_ptype_vsi |=
1784 rte_cpu_to_le_32((fdir_action->flex_off <<
1785 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1786 I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1788 fdirdp->qindex_flex_ptype_vsi |=
1789 rte_cpu_to_le_32((pctype <<
1790 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1791 I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1793 if (filter->input.flow_ext.is_vf)
1794 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1796 /* Use LAN VSI Id by default */
1797 vsi_id = pf->main_vsi->vsi_id;
1798 fdirdp->qindex_flex_ptype_vsi |=
1799 rte_cpu_to_le_32(((uint32_t)vsi_id <<
1800 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1801 I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1803 fdirdp->dtype_cmd_cntindex =
1804 rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1807 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1808 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1809 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1811 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1812 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1813 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1815 if (fdir_action->behavior == I40E_FDIR_REJECT)
1816 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1817 else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
1818 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1819 else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
1820 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1822 PMD_DRV_LOG(ERR, "Failed to program FDIR filter: unsupported fdir behavior.");
1826 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1827 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1828 I40E_TXD_FLTR_QW1_DEST_MASK);
1830 fdirdp->dtype_cmd_cntindex |=
1831 rte_cpu_to_le_32((fdir_action->report_status <<
1832 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
1833 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
1835 fdirdp->dtype_cmd_cntindex |=
1836 rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
1837 fdirdp->dtype_cmd_cntindex |=
1839 ((uint32_t)pf->fdir.match_counter_index <<
1840 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1841 I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
1843 fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
1845 PMD_DRV_LOG(INFO, "filling transmit descriptor.");
1846 txdp = &txq->tx_ring[txq->tx_tail + 1];
1847 txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
1848 td_cmd = I40E_TX_DESC_CMD_EOP |
1849 I40E_TX_DESC_CMD_RS |
1850 I40E_TX_DESC_CMD_DUMMY;
1852 txdp->cmd_type_offset_bsz =
1853 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
1855 txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
1856 if (txq->tx_tail >= txq->nb_tx_desc)
1858 /* Update the tx tail register */
1860 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
1861 for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
1862 if ((txdp->cmd_type_offset_bsz &
1863 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1864 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1868 if (i >= I40E_FDIR_MAX_WAIT_US) {
1870 "Failed to program FDIR filter: time out to get DD on tx queue.");
1873 /* totally delay 10 ms to check programming status*/
1874 rte_delay_us(I40E_FDIR_MAX_WAIT_US);
1875 if (i40e_check_fdir_programming_status(rxq) < 0) {
1877 "Failed to program FDIR filter: programming status reported.");
1885 * i40e_fdir_flush - clear all filters of Flow Director table
1886 * @pf: board private structure
1889 i40e_fdir_flush(struct rte_eth_dev *dev)
1891 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1892 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1894 uint16_t guarant_cnt, best_cnt;
1897 I40E_WRITE_REG(hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
1898 I40E_WRITE_FLUSH(hw);
1900 for (i = 0; i < I40E_FDIR_FLUSH_RETRY; i++) {
1901 rte_delay_ms(I40E_FDIR_FLUSH_INTERVAL_MS);
1902 reg = I40E_READ_REG(hw, I40E_PFQF_CTL_1);
1903 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
1906 if (i >= I40E_FDIR_FLUSH_RETRY) {
1907 PMD_DRV_LOG(ERR, "FD table did not flush, may need more time.");
1910 guarant_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
1911 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
1912 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
1913 best_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
1914 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
1915 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
1916 if (guarant_cnt != 0 || best_cnt != 0) {
1917 PMD_DRV_LOG(ERR, "Failed to flush FD table.");
1920 PMD_DRV_LOG(INFO, "FD table Flush success.");
1925 i40e_fdir_info_get_flex_set(struct i40e_pf *pf,
1926 struct rte_eth_flex_payload_cfg *flex_set,
1929 struct i40e_fdir_flex_pit *flex_pit;
1930 struct rte_eth_flex_payload_cfg *ptr = flex_set;
1931 uint16_t src, dst, size, j, k;
1932 uint8_t i, layer_idx;
1934 for (layer_idx = I40E_FLXPLD_L2_IDX;
1935 layer_idx <= I40E_FLXPLD_L4_IDX;
1937 if (layer_idx == I40E_FLXPLD_L2_IDX)
1938 ptr->type = RTE_ETH_L2_PAYLOAD;
1939 else if (layer_idx == I40E_FLXPLD_L3_IDX)
1940 ptr->type = RTE_ETH_L3_PAYLOAD;
1941 else if (layer_idx == I40E_FLXPLD_L4_IDX)
1942 ptr->type = RTE_ETH_L4_PAYLOAD;
1944 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
1945 flex_pit = &pf->fdir.flex_set[layer_idx *
1946 I40E_MAX_FLXPLD_FIED + i];
1947 if (flex_pit->size == 0)
1949 src = flex_pit->src_offset * sizeof(uint16_t);
1950 dst = flex_pit->dst_offset * sizeof(uint16_t);
1951 size = flex_pit->size * sizeof(uint16_t);
1952 for (j = src, k = dst; j < src + size; j++, k++)
1953 ptr->src_offset[k] = j;
1961 i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
1962 struct rte_eth_fdir_flex_mask *flex_mask,
1965 struct i40e_fdir_flex_mask *mask;
1966 struct rte_eth_fdir_flex_mask *ptr = flex_mask;
1969 uint16_t off_bytes, mask_tmp;
1971 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
1972 i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
1974 mask = &pf->fdir.flex_mask[i];
1975 flow_type = i40e_pctype_to_flowtype(pf->adapter,
1976 (enum i40e_filter_pctype)i);
1977 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
1980 for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
1981 if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
1982 ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX;
1983 ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX;
1985 ptr->mask[j * sizeof(uint16_t)] = 0x0;
1986 ptr->mask[j * sizeof(uint16_t) + 1] = 0x0;
1989 for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) {
1990 off_bytes = mask->bitmask[j].offset * sizeof(uint16_t);
1991 mask_tmp = ~mask->bitmask[j].mask;
1992 ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp);
1993 ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp);
1995 ptr->flow_type = flow_type;
2002 * i40e_fdir_info_get - get information of Flow Director
2003 * @pf: ethernet device to get info from
2004 * @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with
2005 * the flow director information.
2008 i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
2010 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2011 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2012 uint16_t num_flex_set = 0;
2013 uint16_t num_flex_mask = 0;
2016 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
2017 fdir->mode = RTE_FDIR_MODE_PERFECT;
2019 fdir->mode = RTE_FDIR_MODE_NONE;
2022 (uint32_t)hw->func_caps.fd_filters_guaranteed;
2024 (uint32_t)hw->func_caps.fd_filters_best_effort;
2025 fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
2026 fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
2027 for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
2028 fdir->flow_types_mask[i] = 0ULL;
2029 fdir->flex_payload_unit = sizeof(uint16_t);
2030 fdir->flex_bitmask_unit = sizeof(uint16_t);
2031 fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
2032 fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF;
2033 fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD;
2035 i40e_fdir_info_get_flex_set(pf,
2036 fdir->flex_conf.flex_set,
2038 i40e_fdir_info_get_flex_mask(pf,
2039 fdir->flex_conf.flex_mask,
2042 fdir->flex_conf.nb_payloads = num_flex_set;
2043 fdir->flex_conf.nb_flexmasks = num_flex_mask;
2047 * i40e_fdir_stat_get - get statistics of Flow Director
2048 * @pf: ethernet device to get info from
2049 * @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with
2050 * the flow director statistics.
2053 i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat)
2055 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2056 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2059 fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2061 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2062 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2064 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2065 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2069 i40e_fdir_filter_set(struct rte_eth_dev *dev,
2070 struct rte_eth_fdir_filter_info *info)
2072 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2076 PMD_DRV_LOG(ERR, "Invalid pointer");
2080 switch (info->info_type) {
2081 case RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT:
2082 ret = i40e_fdir_filter_inset_select(pf,
2083 &(info->info.input_set_conf));
2086 PMD_DRV_LOG(ERR, "FD filter info type (%d) not supported",
2095 * i40e_fdir_ctrl_func - deal with all operations on flow director.
2096 * @pf: board private structure
2097 * @filter_op:operation will be taken.
2098 * @arg: a pointer to specific structure corresponding to the filter_op
2101 i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
2102 enum rte_filter_op filter_op,
2105 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2108 if ((pf->flags & I40E_FLAG_FDIR) == 0)
2111 if (filter_op == RTE_ETH_FILTER_NOP)
2114 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
2117 switch (filter_op) {
2118 case RTE_ETH_FILTER_ADD:
2119 ret = i40e_add_del_fdir_filter(dev,
2120 (struct rte_eth_fdir_filter *)arg,
2123 case RTE_ETH_FILTER_DELETE:
2124 ret = i40e_add_del_fdir_filter(dev,
2125 (struct rte_eth_fdir_filter *)arg,
2128 case RTE_ETH_FILTER_FLUSH:
2129 ret = i40e_fdir_flush(dev);
2131 case RTE_ETH_FILTER_INFO:
2132 i40e_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
2134 case RTE_ETH_FILTER_SET:
2135 ret = i40e_fdir_filter_set(dev,
2136 (struct rte_eth_fdir_filter_info *)arg);
2138 case RTE_ETH_FILTER_STATS:
2139 i40e_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
2142 PMD_DRV_LOG(ERR, "unknown operation %u.", filter_op);
2149 /* Restore flow director filter */
2151 i40e_fdir_filter_restore(struct i40e_pf *pf)
2153 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
2154 struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
2155 struct i40e_fdir_filter *f;
2156 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2158 uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
2159 uint32_t best_cnt; /**< Number of filters in best effort spaces. */
2161 TAILQ_FOREACH(f, fdir_list, rules)
2162 i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
2164 fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2166 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2167 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2169 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2170 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2172 PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d",
2173 guarant_cnt, best_cnt);