1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
13 #include <rte_ether.h>
14 #include <rte_ethdev.h>
16 #include <rte_memzone.h>
17 #include <rte_malloc.h>
23 #include <rte_hash_crc.h>
25 #include "i40e_logs.h"
26 #include "base/i40e_type.h"
27 #include "base/i40e_prototype.h"
28 #include "i40e_ethdev.h"
29 #include "i40e_rxtx.h"
31 #define I40E_FDIR_MZ_NAME "FDIR_MEMZONE"
33 #define IPV6_ADDR_LEN 16
36 #define I40E_FDIR_PKT_LEN 512
37 #define I40E_FDIR_IP_DEFAULT_LEN 420
38 #define I40E_FDIR_IP_DEFAULT_TTL 0x40
39 #define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45
40 #define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50
41 #define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60000000
43 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF
44 #define I40E_FDIR_IPv6_PAYLOAD_LEN 380
45 #define I40E_FDIR_UDP_DEFAULT_LEN 400
46 #define I40E_FDIR_GTP_DEFAULT_LEN 384
47 #define I40E_FDIR_INNER_IP_DEFAULT_LEN 384
48 #define I40E_FDIR_INNER_IPV6_DEFAULT_LEN 344
50 #define I40E_FDIR_GTPC_DST_PORT 2123
51 #define I40E_FDIR_GTPU_DST_PORT 2152
52 #define I40E_FDIR_GTP_VER_FLAG_0X30 0x30
53 #define I40E_FDIR_GTP_VER_FLAG_0X32 0x32
54 #define I40E_FDIR_GTP_MSG_TYPE_0X01 0x01
55 #define I40E_FDIR_GTP_MSG_TYPE_0XFF 0xFF
57 /* Wait time for fdir filter programming */
58 #define I40E_FDIR_MAX_WAIT_US 10000
60 /* Wait count and interval for fdir filter flush */
61 #define I40E_FDIR_FLUSH_RETRY 50
62 #define I40E_FDIR_FLUSH_INTERVAL_MS 5
64 #define I40E_COUNTER_PF 2
65 /* Statistic counter index for one pf */
66 #define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF)
68 #define I40E_FDIR_FLOWS ( \
69 (1 << RTE_ETH_FLOW_FRAG_IPV4) | \
70 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
71 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
72 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
73 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
74 (1 << RTE_ETH_FLOW_FRAG_IPV6) | \
75 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
76 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
77 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
78 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
79 (1 << RTE_ETH_FLOW_L2_PAYLOAD))
81 static int i40e_fdir_filter_programming(struct i40e_pf *pf,
82 enum i40e_filter_pctype pctype,
83 const struct rte_eth_fdir_filter *filter,
85 static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
86 struct i40e_fdir_filter *filter);
87 static struct i40e_fdir_filter *
88 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
89 const struct i40e_fdir_input *input);
90 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
91 struct i40e_fdir_filter *filter);
93 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
94 enum i40e_filter_pctype pctype,
95 const struct i40e_fdir_filter_conf *filter,
99 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
101 struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
102 struct i40e_hmc_obj_rxq rx_ctx;
103 int err = I40E_SUCCESS;
105 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
106 /* Init the RX queue in hardware */
107 rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT;
109 rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
110 rx_ctx.qlen = rxq->nb_rx_desc;
111 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
114 rx_ctx.dtype = i40e_header_split_none;
115 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
116 rx_ctx.rxmax = ETHER_MAX_LEN;
117 rx_ctx.tphrdesc_ena = 1;
118 rx_ctx.tphwdesc_ena = 1;
119 rx_ctx.tphdata_ena = 1;
120 rx_ctx.tphhead_ena = 1;
121 rx_ctx.lrxqthresh = 2;
127 err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx);
128 if (err != I40E_SUCCESS) {
129 PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context.");
132 err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx);
133 if (err != I40E_SUCCESS) {
134 PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context.");
137 rxq->qrx_tail = hw->hw_addr +
138 I40E_QRX_TAIL(rxq->vsi->base_queue);
141 /* Init the RX tail regieter. */
142 I40E_PCI_REG_WRITE(rxq->qrx_tail, 0);
143 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
149 * i40e_fdir_setup - reserve and initialize the Flow Director resources
150 * @pf: board private structure
153 i40e_fdir_setup(struct i40e_pf *pf)
155 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
156 struct i40e_vsi *vsi;
157 int err = I40E_SUCCESS;
158 char z_name[RTE_MEMZONE_NAMESIZE];
159 const struct rte_memzone *mz = NULL;
160 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
162 if ((pf->flags & I40E_FLAG_FDIR) == 0) {
163 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
164 return I40E_NOT_SUPPORTED;
167 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u,"
168 " num_filters_best_effort = %u.",
169 hw->func_caps.fd_filters_guaranteed,
170 hw->func_caps.fd_filters_best_effort);
172 vsi = pf->fdir.fdir_vsi;
174 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
177 /* make new FDIR VSI */
178 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
180 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
181 return I40E_ERR_NO_AVAILABLE_VSI;
183 pf->fdir.fdir_vsi = vsi;
185 /*Fdir tx queue setup*/
186 err = i40e_fdir_setup_tx_resources(pf);
188 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
192 /*Fdir rx queue setup*/
193 err = i40e_fdir_setup_rx_resources(pf);
195 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
199 err = i40e_tx_queue_init(pf->fdir.txq);
201 PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization.");
205 /* need switch on before dev start*/
206 err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE);
208 PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on.");
212 /* Init the rx queue in hardware */
213 err = i40e_fdir_rx_queue_init(pf->fdir.rxq);
215 PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization.");
219 /* switch on rx queue */
220 err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE);
222 PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on.");
226 /* reserve memory for the fdir programming packet */
227 snprintf(z_name, sizeof(z_name), "%s_%s_%d",
228 eth_dev->device->driver->name,
230 eth_dev->data->port_id);
231 mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
233 PMD_DRV_LOG(ERR, "Cannot init memzone for "
234 "flow director program packet.");
235 err = I40E_ERR_NO_MEMORY;
238 pf->fdir.prg_pkt = mz->addr;
239 pf->fdir.dma_addr = mz->iova;
241 pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
242 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
247 i40e_dev_rx_queue_release(pf->fdir.rxq);
250 i40e_dev_tx_queue_release(pf->fdir.txq);
253 i40e_vsi_release(vsi);
254 pf->fdir.fdir_vsi = NULL;
259 * i40e_fdir_teardown - release the Flow Director resources
260 * @pf: board private structure
263 i40e_fdir_teardown(struct i40e_pf *pf)
265 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
266 struct i40e_vsi *vsi;
268 vsi = pf->fdir.fdir_vsi;
271 int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
273 PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off");
274 err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
276 PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
277 i40e_dev_rx_queue_release(pf->fdir.rxq);
279 i40e_dev_tx_queue_release(pf->fdir.txq);
281 i40e_vsi_release(vsi);
282 pf->fdir.fdir_vsi = NULL;
285 /* check whether the flow director table in empty */
287 i40e_fdir_empty(struct i40e_hw *hw)
289 uint32_t guarant_cnt, best_cnt;
291 guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
292 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
293 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
294 best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
295 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
296 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
297 if (best_cnt + guarant_cnt > 0)
304 * Initialize the configuration about bytes stream extracted as flexible payload
308 i40e_init_flx_pld(struct i40e_pf *pf)
310 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
316 * Define the bytes stream extracted as flexible payload in
317 * field vector. By default, select 8 words from the beginning
318 * of payload as flexible payload.
320 for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) {
321 index = i * I40E_MAX_FLXPLD_FIED;
322 pf->fdir.flex_set[index].src_offset = 0;
323 pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM;
324 pf->fdir.flex_set[index].dst_offset = 0;
325 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900);
327 I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/
329 I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/
332 /* initialize the masks */
333 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
334 pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
335 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
337 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
339 pf->fdir.flex_mask[pctype].word_mask = 0;
340 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
341 for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
342 pf->fdir.flex_mask[pctype].bitmask[i].offset = 0;
343 pf->fdir.flex_mask[pctype].bitmask[i].mask = 0;
344 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0);
349 #define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
350 if ((flex_pit2).src_offset < \
351 (flex_pit1).src_offset + (flex_pit1).size) { \
352 PMD_DRV_LOG(ERR, "src_offset should be not" \
353 " less than than previous offset" \
354 " + previous FSIZE."); \
360 * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure,
361 * and the flex_pit will be sorted by it's src_offset value
363 static inline uint16_t
364 i40e_srcoff_to_flx_pit(const uint16_t *src_offset,
365 struct i40e_fdir_flex_pit *flex_pit)
367 uint16_t src_tmp, size, num = 0;
368 uint16_t i, k, j = 0;
370 while (j < I40E_FDIR_MAX_FLEX_LEN) {
372 for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) {
373 if (src_offset[j + 1] == src_offset[j] + 1)
378 src_tmp = src_offset[j] + 1 - size;
379 /* the flex_pit need to be sort by src_offset */
380 for (i = 0; i < num; i++) {
381 if (src_tmp < flex_pit[i].src_offset)
384 /* if insert required, move backward */
385 for (k = num; k > i; k--)
386 flex_pit[k] = flex_pit[k - 1];
388 flex_pit[i].dst_offset = j + 1 - size;
389 flex_pit[i].src_offset = src_tmp;
390 flex_pit[i].size = size;
397 /* i40e_check_fdir_flex_payload -check flex payload configuration arguments */
399 i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
401 struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN];
404 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
405 if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
406 PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
411 memset(flex_pit, 0, sizeof(flex_pit));
412 num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
413 if (num > I40E_MAX_FLXPLD_FIED) {
414 PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
417 for (i = 0; i < num; i++) {
418 if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 ||
419 flex_pit[i].src_offset & 0x01) {
420 PMD_DRV_LOG(ERR, "flexpayload should be measured"
425 I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]);
431 * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration
432 * arguments are valid
435 i40e_check_fdir_flex_conf(const struct i40e_adapter *adapter,
436 const struct rte_eth_fdir_flex_conf *conf)
438 const struct rte_eth_flex_payload_cfg *flex_cfg;
439 const struct rte_eth_fdir_flex_mask *flex_mask;
444 enum i40e_filter_pctype pctype;
447 PMD_DRV_LOG(INFO, "NULL pointer.");
450 /* check flexible payload setting configuration */
451 if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) {
452 PMD_DRV_LOG(ERR, "invalid number of payload setting.");
455 for (i = 0; i < conf->nb_payloads; i++) {
456 flex_cfg = &conf->flex_set[i];
457 if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) {
458 PMD_DRV_LOG(ERR, "invalid payload type.");
461 ret = i40e_check_fdir_flex_payload(flex_cfg);
463 PMD_DRV_LOG(ERR, "invalid flex payload arguments.");
468 /* check flex mask setting configuration */
469 if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) {
470 PMD_DRV_LOG(ERR, "invalid number of flex masks.");
473 for (i = 0; i < conf->nb_flexmasks; i++) {
474 flex_mask = &conf->flex_mask[i];
475 pctype = i40e_flowtype_to_pctype(adapter, flex_mask->flow_type);
476 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
477 PMD_DRV_LOG(WARNING, "invalid flow type.");
481 for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) {
482 mask_tmp = I40E_WORD(flex_mask->mask[j],
483 flex_mask->mask[j + 1]);
484 if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) {
486 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) {
487 PMD_DRV_LOG(ERR, " exceed maximal"
488 " number of bitmasks.");
498 * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload
499 * @pf: board private structure
500 * @cfg: the rule how bytes stream is extracted as flexible payload
503 i40e_set_flx_pld_cfg(struct i40e_pf *pf,
504 const struct rte_eth_flex_payload_cfg *cfg)
506 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
507 struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
509 uint16_t num, min_next_off; /* in words */
510 uint8_t field_idx = 0;
511 uint8_t layer_idx = 0;
514 if (cfg->type == RTE_ETH_L2_PAYLOAD)
515 layer_idx = I40E_FLXPLD_L2_IDX;
516 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
517 layer_idx = I40E_FLXPLD_L3_IDX;
518 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
519 layer_idx = I40E_FLXPLD_L4_IDX;
521 memset(flex_pit, 0, sizeof(flex_pit));
522 num = i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit);
524 for (i = 0; i < RTE_MIN(num, RTE_DIM(flex_pit)); i++) {
525 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
526 /* record the info in fdir structure */
527 pf->fdir.flex_set[field_idx].src_offset =
528 flex_pit[i].src_offset / sizeof(uint16_t);
529 pf->fdir.flex_set[field_idx].size =
530 flex_pit[i].size / sizeof(uint16_t);
531 pf->fdir.flex_set[field_idx].dst_offset =
532 flex_pit[i].dst_offset / sizeof(uint16_t);
533 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
534 pf->fdir.flex_set[field_idx].size,
535 pf->fdir.flex_set[field_idx].dst_offset);
537 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
539 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
540 pf->fdir.flex_set[field_idx].size;
542 for (; i < I40E_MAX_FLXPLD_FIED; i++) {
543 /* set the non-used register obeying register's constrain */
544 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
545 NONUSE_FLX_PIT_DEST_OFF);
547 I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i),
554 * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload
555 * @pf: board private structure
556 * @pctype: packet classify type
557 * @flex_masks: mask for flexible payload
560 i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
561 enum i40e_filter_pctype pctype,
562 const struct rte_eth_fdir_flex_mask *mask_cfg)
564 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
565 struct i40e_fdir_flex_mask *flex_mask;
566 uint32_t flxinset, fd_mask;
568 uint8_t i, nb_bitmask = 0;
570 flex_mask = &pf->fdir.flex_mask[pctype];
571 memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
572 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
573 mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]);
574 if (mask_tmp != 0x0) {
575 flex_mask->word_mask |=
576 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
577 if (mask_tmp != UINT16_MAX) {
579 flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp;
580 flex_mask->bitmask[nb_bitmask].offset =
581 i / sizeof(uint16_t);
586 /* write mask to hw */
587 flxinset = (flex_mask->word_mask <<
588 I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
589 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
590 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
592 for (i = 0; i < nb_bitmask; i++) {
593 fd_mask = (flex_mask->bitmask[i].mask <<
594 I40E_PRTQF_FD_MSK_MASK_SHIFT) &
595 I40E_PRTQF_FD_MSK_MASK_MASK;
596 fd_mask |= ((flex_mask->bitmask[i].offset +
597 I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
598 I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
599 I40E_PRTQF_FD_MSK_OFFSET_MASK;
600 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
605 * Configure flow director related setting
608 i40e_fdir_configure(struct rte_eth_dev *dev)
610 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
611 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
612 struct rte_eth_fdir_flex_conf *conf;
613 enum i40e_filter_pctype pctype;
619 * configuration need to be done before
620 * flow director filters are added
621 * If filters exist, flush them.
623 if (i40e_fdir_empty(hw) < 0) {
624 ret = i40e_fdir_flush(dev);
626 PMD_DRV_LOG(ERR, "failed to flush fdir table.");
631 /* enable FDIR filter */
632 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
633 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
634 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
636 i40e_init_flx_pld(pf); /* set flex config to default value */
638 conf = &dev->data->dev_conf.fdir_conf.flex_conf;
639 ret = i40e_check_fdir_flex_conf(pf->adapter, conf);
641 PMD_DRV_LOG(ERR, " invalid configuration arguments.");
644 /* configure flex payload */
645 for (i = 0; i < conf->nb_payloads; i++)
646 i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
647 /* configure flex mask*/
648 for (i = 0; i < conf->nb_flexmasks; i++) {
649 if (hw->mac.type == I40E_MAC_X722) {
650 /* get translated pctype value in fd pctype register */
651 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
652 hw, I40E_GLQF_FD_PCTYPES(
653 (int)i40e_flowtype_to_pctype(pf->adapter,
654 conf->flex_mask[i].flow_type)));
656 pctype = i40e_flowtype_to_pctype(pf->adapter,
657 conf->flex_mask[i].flow_type);
659 i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]);
666 i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
667 unsigned char *raw_pkt,
670 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
671 uint16_t *ether_type;
672 uint8_t len = 2 * sizeof(struct ether_addr);
674 struct ipv6_hdr *ip6;
675 static const uint8_t next_proto[] = {
676 [RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP,
677 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
678 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
679 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = IPPROTO_SCTP,
680 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = IPPROTO_IP,
681 [RTE_ETH_FLOW_FRAG_IPV6] = IPPROTO_NONE,
682 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
683 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
684 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = IPPROTO_SCTP,
685 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = IPPROTO_NONE,
688 raw_pkt += 2 * sizeof(struct ether_addr);
689 if (vlan && fdir_input->flow_ext.vlan_tci) {
690 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
691 rte_memcpy(raw_pkt + sizeof(uint16_t),
692 &fdir_input->flow_ext.vlan_tci,
694 raw_pkt += sizeof(vlan_frame);
695 len += sizeof(vlan_frame);
697 ether_type = (uint16_t *)raw_pkt;
698 raw_pkt += sizeof(uint16_t);
699 len += sizeof(uint16_t);
701 switch (fdir_input->flow_type) {
702 case RTE_ETH_FLOW_L2_PAYLOAD:
703 *ether_type = fdir_input->flow.l2_flow.ether_type;
705 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
706 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
707 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
708 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
709 case RTE_ETH_FLOW_FRAG_IPV4:
710 ip = (struct ipv4_hdr *)raw_pkt;
712 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
713 ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
714 /* set len to by default */
715 ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
716 ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
717 fdir_input->flow.ip4_flow.proto :
718 next_proto[fdir_input->flow_type];
719 ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
720 fdir_input->flow.ip4_flow.ttl :
721 I40E_FDIR_IP_DEFAULT_TTL;
722 ip->type_of_service = fdir_input->flow.ip4_flow.tos;
724 * The source and destination fields in the transmitted packet
725 * need to be presented in a reversed order with respect
726 * to the expected received packets.
728 ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
729 ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
730 len += sizeof(struct ipv4_hdr);
732 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
733 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
734 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
735 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
736 case RTE_ETH_FLOW_FRAG_IPV6:
737 ip6 = (struct ipv6_hdr *)raw_pkt;
739 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
741 rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
742 (fdir_input->flow.ipv6_flow.tc <<
743 I40E_FDIR_IPv6_TC_OFFSET));
745 rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
746 ip6->proto = fdir_input->flow.ipv6_flow.proto ?
747 fdir_input->flow.ipv6_flow.proto :
748 next_proto[fdir_input->flow_type];
749 ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
750 fdir_input->flow.ipv6_flow.hop_limits :
751 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
753 * The source and destination fields in the transmitted packet
754 * need to be presented in a reversed order with respect
755 * to the expected received packets.
757 rte_memcpy(&(ip6->src_addr),
758 &(fdir_input->flow.ipv6_flow.dst_ip),
760 rte_memcpy(&(ip6->dst_addr),
761 &(fdir_input->flow.ipv6_flow.src_ip),
763 len += sizeof(struct ipv6_hdr);
766 PMD_DRV_LOG(ERR, "unknown flow type %u.",
767 fdir_input->flow_type);
775 * i40e_fdir_construct_pkt - construct packet based on fields in input
776 * @pf: board private structure
777 * @fdir_input: input set of the flow director entry
778 * @raw_pkt: a packet to be constructed
781 i40e_fdir_construct_pkt(struct i40e_pf *pf,
782 const struct rte_eth_fdir_input *fdir_input,
783 unsigned char *raw_pkt)
785 unsigned char *payload, *ptr;
788 struct sctp_hdr *sctp;
789 uint8_t size, dst = 0;
790 uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
793 /* fill the ethernet and IP head */
794 len = i40e_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
795 !!fdir_input->flow_ext.vlan_tci);
799 /* fill the L4 head */
800 switch (fdir_input->flow_type) {
801 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
802 udp = (struct udp_hdr *)(raw_pkt + len);
803 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
805 * The source and destination fields in the transmitted packet
806 * need to be presented in a reversed order with respect
807 * to the expected received packets.
809 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
810 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
811 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
814 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
815 tcp = (struct tcp_hdr *)(raw_pkt + len);
816 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
818 * The source and destination fields in the transmitted packet
819 * need to be presented in a reversed order with respect
820 * to the expected received packets.
822 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
823 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
824 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
827 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
828 sctp = (struct sctp_hdr *)(raw_pkt + len);
829 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
831 * The source and destination fields in the transmitted packet
832 * need to be presented in a reversed order with respect
833 * to the expected received packets.
835 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
836 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
837 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
840 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
841 case RTE_ETH_FLOW_FRAG_IPV4:
842 payload = raw_pkt + len;
843 set_idx = I40E_FLXPLD_L3_IDX;
846 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
847 udp = (struct udp_hdr *)(raw_pkt + len);
848 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
850 * The source and destination fields in the transmitted packet
851 * need to be presented in a reversed order with respect
852 * to the expected received packets.
854 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
855 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
856 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
859 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
860 tcp = (struct tcp_hdr *)(raw_pkt + len);
861 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
863 * The source and destination fields in the transmitted packet
864 * need to be presented in a reversed order with respect
865 * to the expected received packets.
867 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
868 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
869 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
872 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
873 sctp = (struct sctp_hdr *)(raw_pkt + len);
874 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
876 * The source and destination fields in the transmitted packet
877 * need to be presented in a reversed order with respect
878 * to the expected received packets.
880 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
881 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
882 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
885 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
886 case RTE_ETH_FLOW_FRAG_IPV6:
887 payload = raw_pkt + len;
888 set_idx = I40E_FLXPLD_L3_IDX;
890 case RTE_ETH_FLOW_L2_PAYLOAD:
891 payload = raw_pkt + len;
893 * ARP packet is a special case on which the payload
894 * starts after the whole ARP header
896 if (fdir_input->flow.l2_flow.ether_type ==
897 rte_cpu_to_be_16(ETHER_TYPE_ARP))
898 payload += sizeof(struct arp_hdr);
899 set_idx = I40E_FLXPLD_L2_IDX;
902 PMD_DRV_LOG(ERR, "unknown flow type %u.", fdir_input->flow_type);
906 /* fill the flexbytes to payload */
907 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
908 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
909 size = pf->fdir.flex_set[pit_idx].size;
912 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
914 pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
916 &fdir_input->flow_ext.flexbytes[dst],
917 size * sizeof(uint16_t));
923 static struct i40e_customized_pctype *
924 i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype)
926 struct i40e_customized_pctype *cus_pctype;
927 enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC;
929 for (; i < I40E_CUSTOMIZED_MAX; i++) {
930 cus_pctype = &pf->customized_pctype[i];
931 if (pctype == cus_pctype->pctype)
938 i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
939 const struct i40e_fdir_input *fdir_input,
940 unsigned char *raw_pkt,
943 struct i40e_customized_pctype *cus_pctype = NULL;
944 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
945 uint16_t *ether_type;
946 uint8_t len = 2 * sizeof(struct ether_addr);
948 struct ipv6_hdr *ip6;
949 uint8_t pctype = fdir_input->pctype;
950 bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
951 static const uint8_t next_proto[] = {
952 [I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
953 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
954 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
955 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
956 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
957 [I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
958 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
959 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
960 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
961 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
964 raw_pkt += 2 * sizeof(struct ether_addr);
965 if (vlan && fdir_input->flow_ext.vlan_tci) {
966 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
967 rte_memcpy(raw_pkt + sizeof(uint16_t),
968 &fdir_input->flow_ext.vlan_tci,
970 raw_pkt += sizeof(vlan_frame);
971 len += sizeof(vlan_frame);
973 ether_type = (uint16_t *)raw_pkt;
974 raw_pkt += sizeof(uint16_t);
975 len += sizeof(uint16_t);
977 if (is_customized_pctype) {
978 cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
980 PMD_DRV_LOG(ERR, "unknown pctype %u.",
986 if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
987 *ether_type = fdir_input->flow.l2_flow.ether_type;
988 else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
989 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
990 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
991 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
992 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
993 is_customized_pctype) {
994 ip = (struct ipv4_hdr *)raw_pkt;
996 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
997 ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
998 /* set len to by default */
999 ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
1000 ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
1001 fdir_input->flow.ip4_flow.ttl :
1002 I40E_FDIR_IP_DEFAULT_TTL;
1003 ip->type_of_service = fdir_input->flow.ip4_flow.tos;
1005 * The source and destination fields in the transmitted packet
1006 * need to be presented in a reversed order with respect
1007 * to the expected received packets.
1009 ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
1010 ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
1012 if (!is_customized_pctype)
1013 ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
1014 fdir_input->flow.ip4_flow.proto :
1015 next_proto[fdir_input->pctype];
1016 else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1017 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1018 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1019 cus_pctype->index == I40E_CUSTOMIZED_GTPU)
1020 ip->next_proto_id = IPPROTO_UDP;
1021 len += sizeof(struct ipv4_hdr);
1022 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
1023 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
1024 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
1025 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1026 pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
1027 ip6 = (struct ipv6_hdr *)raw_pkt;
1029 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
1031 rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
1032 (fdir_input->flow.ipv6_flow.tc <<
1033 I40E_FDIR_IPv6_TC_OFFSET));
1035 rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
1036 ip6->proto = fdir_input->flow.ipv6_flow.proto ?
1037 fdir_input->flow.ipv6_flow.proto :
1038 next_proto[fdir_input->pctype];
1039 ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
1040 fdir_input->flow.ipv6_flow.hop_limits :
1041 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
1043 * The source and destination fields in the transmitted packet
1044 * need to be presented in a reversed order with respect
1045 * to the expected received packets.
1047 rte_memcpy(&ip6->src_addr,
1048 &fdir_input->flow.ipv6_flow.dst_ip,
1050 rte_memcpy(&ip6->dst_addr,
1051 &fdir_input->flow.ipv6_flow.src_ip,
1053 len += sizeof(struct ipv6_hdr);
1055 PMD_DRV_LOG(ERR, "unknown pctype %u.",
1056 fdir_input->pctype);
1064 * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
1065 * @pf: board private structure
1066 * @fdir_input: input set of the flow director entry
1067 * @raw_pkt: a packet to be constructed
1070 i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
1071 const struct i40e_fdir_input *fdir_input,
1072 unsigned char *raw_pkt)
1074 unsigned char *payload = NULL;
1076 struct udp_hdr *udp;
1077 struct tcp_hdr *tcp;
1078 struct sctp_hdr *sctp;
1079 struct rte_flow_item_gtp *gtp;
1080 struct ipv4_hdr *gtp_ipv4;
1081 struct ipv6_hdr *gtp_ipv6;
1082 uint8_t size, dst = 0;
1083 uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
1085 uint8_t pctype = fdir_input->pctype;
1086 struct i40e_customized_pctype *cus_pctype;
1088 /* raw pcket template - just copy contents of the raw packet */
1089 if (fdir_input->flow_ext.pkt_template) {
1090 memcpy(raw_pkt, fdir_input->flow.raw_flow.packet,
1091 fdir_input->flow.raw_flow.length);
1095 /* fill the ethernet and IP head */
1096 len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
1097 !!fdir_input->flow_ext.vlan_tci);
1101 /* fill the L4 head */
1102 if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
1103 udp = (struct udp_hdr *)(raw_pkt + len);
1104 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
1106 * The source and destination fields in the transmitted packet
1107 * need to be presented in a reversed order with respect
1108 * to the expected received packets.
1110 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
1111 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
1112 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1113 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
1114 tcp = (struct tcp_hdr *)(raw_pkt + len);
1115 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
1117 * The source and destination fields in the transmitted packet
1118 * need to be presented in a reversed order with respect
1119 * to the expected received packets.
1121 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
1122 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
1123 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1124 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
1125 sctp = (struct sctp_hdr *)(raw_pkt + len);
1126 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
1128 * The source and destination fields in the transmitted packet
1129 * need to be presented in a reversed order with respect
1130 * to the expected received packets.
1132 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
1133 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
1134 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
1135 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1136 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
1137 payload = raw_pkt + len;
1138 set_idx = I40E_FLXPLD_L3_IDX;
1139 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
1140 udp = (struct udp_hdr *)(raw_pkt + len);
1141 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
1143 * The source and destination fields in the transmitted packet
1144 * need to be presented in a reversed order with respect
1145 * to the expected received packets.
1147 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
1148 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
1149 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
1150 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
1151 tcp = (struct tcp_hdr *)(raw_pkt + len);
1152 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
1154 * The source and destination fields in the transmitted packet
1155 * need to be presented in a reversed order with respect
1156 * to the expected received packets.
1158 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1159 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
1160 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
1161 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
1162 sctp = (struct sctp_hdr *)(raw_pkt + len);
1163 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
1165 * The source and destination fields in the transmitted packet
1166 * need to be presented in a reversed order with respect
1167 * to the expected received packets.
1169 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
1170 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
1171 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
1172 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1173 pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
1174 payload = raw_pkt + len;
1175 set_idx = I40E_FLXPLD_L3_IDX;
1176 } else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1177 payload = raw_pkt + len;
1179 * ARP packet is a special case on which the payload
1180 * starts after the whole ARP header
1182 if (fdir_input->flow.l2_flow.ether_type ==
1183 rte_cpu_to_be_16(ETHER_TYPE_ARP))
1184 payload += sizeof(struct arp_hdr);
1185 set_idx = I40E_FLXPLD_L2_IDX;
1186 } else if (fdir_input->flow_ext.customized_pctype) {
1187 /* If customized pctype is used */
1188 cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
1189 if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1190 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1191 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1192 cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
1193 udp = (struct udp_hdr *)(raw_pkt + len);
1195 rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1197 gtp = (struct rte_flow_item_gtp *)
1198 ((unsigned char *)udp + sizeof(struct udp_hdr));
1200 rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
1201 gtp->teid = fdir_input->flow.gtp_flow.teid;
1202 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0X01;
1204 /* GTP-C message type is not supported. */
1205 if (cus_pctype->index == I40E_CUSTOMIZED_GTPC) {
1207 rte_cpu_to_be_16(I40E_FDIR_GTPC_DST_PORT);
1208 gtp->v_pt_rsv_flags =
1209 I40E_FDIR_GTP_VER_FLAG_0X32;
1212 rte_cpu_to_be_16(I40E_FDIR_GTPU_DST_PORT);
1213 gtp->v_pt_rsv_flags =
1214 I40E_FDIR_GTP_VER_FLAG_0X30;
1217 if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
1218 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1219 gtp_ipv4 = (struct ipv4_hdr *)
1220 ((unsigned char *)gtp +
1221 sizeof(struct rte_flow_item_gtp));
1222 gtp_ipv4->version_ihl =
1223 I40E_FDIR_IP_DEFAULT_VERSION_IHL;
1224 gtp_ipv4->next_proto_id = IPPROTO_IP;
1225 gtp_ipv4->total_length =
1227 I40E_FDIR_INNER_IP_DEFAULT_LEN);
1228 payload = (unsigned char *)gtp_ipv4 +
1229 sizeof(struct ipv4_hdr);
1230 } else if (cus_pctype->index ==
1231 I40E_CUSTOMIZED_GTPU_IPV6) {
1232 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1233 gtp_ipv6 = (struct ipv6_hdr *)
1234 ((unsigned char *)gtp +
1235 sizeof(struct rte_flow_item_gtp));
1236 gtp_ipv6->vtc_flow =
1238 I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
1239 (0 << I40E_FDIR_IPv6_TC_OFFSET));
1240 gtp_ipv6->proto = IPPROTO_NONE;
1241 gtp_ipv6->payload_len =
1243 I40E_FDIR_INNER_IPV6_DEFAULT_LEN);
1244 gtp_ipv6->hop_limits =
1245 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
1246 payload = (unsigned char *)gtp_ipv6 +
1247 sizeof(struct ipv6_hdr);
1249 payload = (unsigned char *)gtp +
1250 sizeof(struct rte_flow_item_gtp);
1253 PMD_DRV_LOG(ERR, "unknown pctype %u.",
1254 fdir_input->pctype);
1258 /* fill the flexbytes to payload */
1259 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
1260 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
1261 size = pf->fdir.flex_set[pit_idx].size;
1264 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
1266 pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
1267 (void)rte_memcpy(ptr,
1268 &fdir_input->flow_ext.flexbytes[dst],
1269 size * sizeof(uint16_t));
1275 /* Construct the tx flags */
1276 static inline uint64_t
1277 i40e_build_ctob(uint32_t td_cmd,
1282 return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
1283 ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
1284 ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
1285 ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
1286 ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
1290 * check the programming status descriptor in rx queue.
1291 * done after Programming Flow Director is programmed on
1295 i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
1297 volatile union i40e_rx_desc *rxdp;
1304 rxdp = &rxq->rx_ring[rxq->rx_tail];
1305 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1306 rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
1307 >> I40E_RXD_QW1_STATUS_SHIFT;
1309 if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
1310 len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT;
1311 id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1312 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1314 if (len == I40E_RX_PROG_STATUS_DESC_LENGTH &&
1315 id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) {
1317 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
1318 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
1319 if (error == (0x1 <<
1320 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
1321 PMD_DRV_LOG(ERR, "Failed to add FDIR filter"
1322 " (FD_ID %u): programming status"
1324 rxdp->wb.qword0.hi_dword.fd_id);
1326 } else if (error == (0x1 <<
1327 I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
1328 PMD_DRV_LOG(ERR, "Failed to delete FDIR filter"
1329 " (FD_ID %u): programming status"
1331 rxdp->wb.qword0.hi_dword.fd_id);
1334 PMD_DRV_LOG(ERR, "invalid programming status"
1335 " reported, error = %u.", error);
1337 PMD_DRV_LOG(ERR, "unknown programming status"
1338 " reported, len = %d, id = %u.", len, id);
1339 rxdp->wb.qword1.status_error_len = 0;
1341 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
1348 i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
1349 struct i40e_fdir_filter *filter)
1351 rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
1352 if (input->input.flow_ext.pkt_template) {
1353 filter->fdir.input.flow.raw_flow.packet = NULL;
1354 filter->fdir.input.flow.raw_flow.length =
1355 rte_hash_crc(input->input.flow.raw_flow.packet,
1356 input->input.flow.raw_flow.length,
1357 input->input.flow.raw_flow.pctype);
1362 /* Check if there exists the flow director filter */
1363 static struct i40e_fdir_filter *
1364 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
1365 const struct i40e_fdir_input *input)
1369 if (input->flow_ext.pkt_template)
1370 ret = rte_hash_lookup_with_hash(fdir_info->hash_table,
1371 (const void *)input,
1372 input->flow.raw_flow.length);
1374 ret = rte_hash_lookup(fdir_info->hash_table,
1375 (const void *)input);
1379 return fdir_info->hash_map[ret];
1382 /* Add a flow director filter into the SW list */
1384 i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
1386 struct i40e_fdir_info *fdir_info = &pf->fdir;
1389 if (filter->fdir.input.flow_ext.pkt_template)
1390 ret = rte_hash_add_key_with_hash(fdir_info->hash_table,
1391 &filter->fdir.input,
1392 filter->fdir.input.flow.raw_flow.length);
1394 ret = rte_hash_add_key(fdir_info->hash_table,
1395 &filter->fdir.input);
1398 "Failed to insert fdir filter to hash table %d!",
1402 fdir_info->hash_map[ret] = filter;
1404 TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
1409 /* Delete a flow director filter from the SW list */
1411 i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
1413 struct i40e_fdir_info *fdir_info = &pf->fdir;
1414 struct i40e_fdir_filter *filter;
1417 if (input->flow_ext.pkt_template)
1418 ret = rte_hash_del_key_with_hash(fdir_info->hash_table,
1420 input->flow.raw_flow.length);
1422 ret = rte_hash_del_key(fdir_info->hash_table, input);
1425 "Failed to delete fdir filter to hash table %d!",
1429 filter = fdir_info->hash_map[ret];
1430 fdir_info->hash_map[ret] = NULL;
1432 TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
1439 * i40e_add_del_fdir_filter - add or remove a flow director filter.
1440 * @pf: board private structure
1441 * @filter: fdir filter entry
1442 * @add: 0 - delete, 1 - add
1445 i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
1446 const struct rte_eth_fdir_filter *filter,
1449 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1450 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1451 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1452 enum i40e_filter_pctype pctype;
1455 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1456 PMD_DRV_LOG(ERR, "FDIR is not enabled, please"
1457 " check the mode in fdir_conf.");
1461 pctype = i40e_flowtype_to_pctype(pf->adapter, filter->input.flow_type);
1462 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
1463 PMD_DRV_LOG(ERR, "invalid flow_type input.");
1466 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1467 PMD_DRV_LOG(ERR, "Invalid queue ID");
1470 if (filter->input.flow_ext.is_vf &&
1471 filter->input.flow_ext.dst_id >= pf->vf_num) {
1472 PMD_DRV_LOG(ERR, "Invalid VF ID");
1476 memset(pkt, 0, I40E_FDIR_PKT_LEN);
1478 ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
1480 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1484 if (hw->mac.type == I40E_MAC_X722) {
1485 /* get translated pctype value in fd pctype register */
1486 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1487 hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1490 ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
1492 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1501 * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
1502 * @pf: board private structure
1503 * @filter: fdir filter entry
1504 * @add: 0 - delete, 1 - add
1507 i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
1508 const struct i40e_fdir_filter_conf *filter,
1511 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1512 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1513 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1514 enum i40e_filter_pctype pctype;
1515 struct i40e_fdir_info *fdir_info = &pf->fdir;
1516 struct i40e_fdir_filter *fdir_filter, *node;
1517 struct i40e_fdir_filter check_filter; /* Check if the filter exists */
1520 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1521 PMD_DRV_LOG(ERR, "FDIR is not enabled, please check the mode in fdir_conf.");
1525 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1526 PMD_DRV_LOG(ERR, "Invalid queue ID");
1529 if (filter->input.flow_ext.is_vf &&
1530 filter->input.flow_ext.dst_id >= pf->vf_num) {
1531 PMD_DRV_LOG(ERR, "Invalid VF ID");
1534 if (filter->input.flow_ext.pkt_template) {
1535 if (filter->input.flow.raw_flow.length > I40E_FDIR_PKT_LEN ||
1536 !filter->input.flow.raw_flow.packet) {
1537 PMD_DRV_LOG(ERR, "Invalid raw packet template"
1538 " flow filter parameters!");
1541 pctype = filter->input.flow.raw_flow.pctype;
1543 pctype = filter->input.pctype;
1546 /* Check if there is the filter in SW list */
1547 memset(&check_filter, 0, sizeof(check_filter));
1548 i40e_fdir_filter_convert(filter, &check_filter);
1549 node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
1552 "Conflict with existing flow director rules!");
1556 if (!add && !node) {
1558 "There's no corresponding flow firector filter!");
1562 memset(pkt, 0, I40E_FDIR_PKT_LEN);
1564 ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
1566 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1570 if (hw->mac.type == I40E_MAC_X722) {
1571 /* get translated pctype value in fd pctype register */
1572 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1573 hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1576 ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add);
1578 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1584 fdir_filter = rte_zmalloc("fdir_filter",
1585 sizeof(*fdir_filter), 0);
1586 rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
1587 ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
1589 ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
1596 * i40e_fdir_filter_programming - Program a flow director filter rule.
1597 * Is done by Flow Director Programming Descriptor followed by packet
1598 * structure that contains the filter fields need to match.
1599 * @pf: board private structure
1601 * @filter: fdir filter entry
1602 * @add: 0 - delete, 1 - add
1605 i40e_fdir_filter_programming(struct i40e_pf *pf,
1606 enum i40e_filter_pctype pctype,
1607 const struct rte_eth_fdir_filter *filter,
1610 struct i40e_tx_queue *txq = pf->fdir.txq;
1611 struct i40e_rx_queue *rxq = pf->fdir.rxq;
1612 const struct rte_eth_fdir_action *fdir_action = &filter->action;
1613 volatile struct i40e_tx_desc *txdp;
1614 volatile struct i40e_filter_program_desc *fdirdp;
1619 PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1620 fdirdp = (volatile struct i40e_filter_program_desc *)
1621 (&(txq->tx_ring[txq->tx_tail]));
1623 fdirdp->qindex_flex_ptype_vsi =
1624 rte_cpu_to_le_32((fdir_action->rx_queue <<
1625 I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1626 I40E_TXD_FLTR_QW0_QINDEX_MASK);
1628 fdirdp->qindex_flex_ptype_vsi |=
1629 rte_cpu_to_le_32((fdir_action->flex_off <<
1630 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1631 I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1633 fdirdp->qindex_flex_ptype_vsi |=
1634 rte_cpu_to_le_32((pctype <<
1635 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1636 I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1638 if (filter->input.flow_ext.is_vf)
1639 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1641 /* Use LAN VSI Id by default */
1642 vsi_id = pf->main_vsi->vsi_id;
1643 fdirdp->qindex_flex_ptype_vsi |=
1644 rte_cpu_to_le_32(((uint32_t)vsi_id <<
1645 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1646 I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1648 fdirdp->dtype_cmd_cntindex =
1649 rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1652 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1653 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1654 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1656 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1657 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1658 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1660 if (fdir_action->behavior == RTE_ETH_FDIR_REJECT)
1661 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1662 else if (fdir_action->behavior == RTE_ETH_FDIR_ACCEPT)
1663 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1664 else if (fdir_action->behavior == RTE_ETH_FDIR_PASSTHRU)
1665 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1667 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1668 " unsupported fdir behavior.");
1672 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1673 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1674 I40E_TXD_FLTR_QW1_DEST_MASK);
1676 fdirdp->dtype_cmd_cntindex |=
1677 rte_cpu_to_le_32((fdir_action->report_status<<
1678 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
1679 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
1681 fdirdp->dtype_cmd_cntindex |=
1682 rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
1683 fdirdp->dtype_cmd_cntindex |=
1685 ((uint32_t)pf->fdir.match_counter_index <<
1686 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1687 I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
1689 fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
1691 PMD_DRV_LOG(INFO, "filling transmit descriptor.");
1692 txdp = &(txq->tx_ring[txq->tx_tail + 1]);
1693 txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
1694 td_cmd = I40E_TX_DESC_CMD_EOP |
1695 I40E_TX_DESC_CMD_RS |
1696 I40E_TX_DESC_CMD_DUMMY;
1698 txdp->cmd_type_offset_bsz =
1699 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
1701 txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
1702 if (txq->tx_tail >= txq->nb_tx_desc)
1704 /* Update the tx tail register */
1706 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
1707 for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
1708 if ((txdp->cmd_type_offset_bsz &
1709 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1710 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1714 if (i >= I40E_FDIR_MAX_WAIT_US) {
1715 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1716 " time out to get DD on tx queue.");
1719 /* totally delay 10 ms to check programming status*/
1720 for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
1721 if (i40e_check_fdir_programming_status(rxq) >= 0)
1726 "Failed to program FDIR filter: programming status reported.");
1731 * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
1732 * Is done by Flow Director Programming Descriptor followed by packet
1733 * structure that contains the filter fields need to match.
1734 * @pf: board private structure
1736 * @filter: fdir filter entry
1737 * @add: 0 - delete, 1 - add
1740 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
1741 enum i40e_filter_pctype pctype,
1742 const struct i40e_fdir_filter_conf *filter,
1745 struct i40e_tx_queue *txq = pf->fdir.txq;
1746 struct i40e_rx_queue *rxq = pf->fdir.rxq;
1747 const struct i40e_fdir_action *fdir_action = &filter->action;
1748 volatile struct i40e_tx_desc *txdp;
1749 volatile struct i40e_filter_program_desc *fdirdp;
1754 PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1755 fdirdp = (volatile struct i40e_filter_program_desc *)
1756 (&txq->tx_ring[txq->tx_tail]);
1758 fdirdp->qindex_flex_ptype_vsi =
1759 rte_cpu_to_le_32((fdir_action->rx_queue <<
1760 I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1761 I40E_TXD_FLTR_QW0_QINDEX_MASK);
1763 fdirdp->qindex_flex_ptype_vsi |=
1764 rte_cpu_to_le_32((fdir_action->flex_off <<
1765 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1766 I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1768 fdirdp->qindex_flex_ptype_vsi |=
1769 rte_cpu_to_le_32((pctype <<
1770 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1771 I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1773 if (filter->input.flow_ext.is_vf)
1774 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1776 /* Use LAN VSI Id by default */
1777 vsi_id = pf->main_vsi->vsi_id;
1778 fdirdp->qindex_flex_ptype_vsi |=
1779 rte_cpu_to_le_32(((uint32_t)vsi_id <<
1780 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1781 I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1783 fdirdp->dtype_cmd_cntindex =
1784 rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1787 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1788 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1789 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1791 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1792 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1793 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1795 if (fdir_action->behavior == I40E_FDIR_REJECT)
1796 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1797 else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
1798 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1799 else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
1800 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1802 PMD_DRV_LOG(ERR, "Failed to program FDIR filter: unsupported fdir behavior.");
1806 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1807 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1808 I40E_TXD_FLTR_QW1_DEST_MASK);
1810 fdirdp->dtype_cmd_cntindex |=
1811 rte_cpu_to_le_32((fdir_action->report_status <<
1812 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
1813 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
1815 fdirdp->dtype_cmd_cntindex |=
1816 rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
1817 fdirdp->dtype_cmd_cntindex |=
1819 ((uint32_t)pf->fdir.match_counter_index <<
1820 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1821 I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
1823 fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
1825 PMD_DRV_LOG(INFO, "filling transmit descriptor.");
1826 txdp = &txq->tx_ring[txq->tx_tail + 1];
1827 txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
1828 td_cmd = I40E_TX_DESC_CMD_EOP |
1829 I40E_TX_DESC_CMD_RS |
1830 I40E_TX_DESC_CMD_DUMMY;
1832 txdp->cmd_type_offset_bsz =
1833 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
1835 txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
1836 if (txq->tx_tail >= txq->nb_tx_desc)
1838 /* Update the tx tail register */
1840 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
1841 for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
1842 if ((txdp->cmd_type_offset_bsz &
1843 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1844 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1848 if (i >= I40E_FDIR_MAX_WAIT_US) {
1850 "Failed to program FDIR filter: time out to get DD on tx queue.");
1853 /* totally delay 10 ms to check programming status*/
1854 rte_delay_us(I40E_FDIR_MAX_WAIT_US);
1855 if (i40e_check_fdir_programming_status(rxq) < 0) {
1857 "Failed to program FDIR filter: programming status reported.");
1865 * i40e_fdir_flush - clear all filters of Flow Director table
1866 * @pf: board private structure
1869 i40e_fdir_flush(struct rte_eth_dev *dev)
1871 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1872 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1874 uint16_t guarant_cnt, best_cnt;
1877 I40E_WRITE_REG(hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
1878 I40E_WRITE_FLUSH(hw);
1880 for (i = 0; i < I40E_FDIR_FLUSH_RETRY; i++) {
1881 rte_delay_ms(I40E_FDIR_FLUSH_INTERVAL_MS);
1882 reg = I40E_READ_REG(hw, I40E_PFQF_CTL_1);
1883 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
1886 if (i >= I40E_FDIR_FLUSH_RETRY) {
1887 PMD_DRV_LOG(ERR, "FD table did not flush, may need more time.");
1890 guarant_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
1891 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
1892 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
1893 best_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
1894 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
1895 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
1896 if (guarant_cnt != 0 || best_cnt != 0) {
1897 PMD_DRV_LOG(ERR, "Failed to flush FD table.");
1900 PMD_DRV_LOG(INFO, "FD table Flush success.");
1905 i40e_fdir_info_get_flex_set(struct i40e_pf *pf,
1906 struct rte_eth_flex_payload_cfg *flex_set,
1909 struct i40e_fdir_flex_pit *flex_pit;
1910 struct rte_eth_flex_payload_cfg *ptr = flex_set;
1911 uint16_t src, dst, size, j, k;
1912 uint8_t i, layer_idx;
1914 for (layer_idx = I40E_FLXPLD_L2_IDX;
1915 layer_idx <= I40E_FLXPLD_L4_IDX;
1917 if (layer_idx == I40E_FLXPLD_L2_IDX)
1918 ptr->type = RTE_ETH_L2_PAYLOAD;
1919 else if (layer_idx == I40E_FLXPLD_L3_IDX)
1920 ptr->type = RTE_ETH_L3_PAYLOAD;
1921 else if (layer_idx == I40E_FLXPLD_L4_IDX)
1922 ptr->type = RTE_ETH_L4_PAYLOAD;
1924 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
1925 flex_pit = &pf->fdir.flex_set[layer_idx *
1926 I40E_MAX_FLXPLD_FIED + i];
1927 if (flex_pit->size == 0)
1929 src = flex_pit->src_offset * sizeof(uint16_t);
1930 dst = flex_pit->dst_offset * sizeof(uint16_t);
1931 size = flex_pit->size * sizeof(uint16_t);
1932 for (j = src, k = dst; j < src + size; j++, k++)
1933 ptr->src_offset[k] = j;
1941 i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
1942 struct rte_eth_fdir_flex_mask *flex_mask,
1945 struct i40e_fdir_flex_mask *mask;
1946 struct rte_eth_fdir_flex_mask *ptr = flex_mask;
1949 uint16_t off_bytes, mask_tmp;
1951 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
1952 i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
1954 mask = &pf->fdir.flex_mask[i];
1955 flow_type = i40e_pctype_to_flowtype(pf->adapter,
1956 (enum i40e_filter_pctype)i);
1957 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
1960 for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
1961 if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
1962 ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX;
1963 ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX;
1965 ptr->mask[j * sizeof(uint16_t)] = 0x0;
1966 ptr->mask[j * sizeof(uint16_t) + 1] = 0x0;
1969 for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) {
1970 off_bytes = mask->bitmask[j].offset * sizeof(uint16_t);
1971 mask_tmp = ~mask->bitmask[j].mask;
1972 ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp);
1973 ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp);
1975 ptr->flow_type = flow_type;
1982 * i40e_fdir_info_get - get information of Flow Director
1983 * @pf: ethernet device to get info from
1984 * @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with
1985 * the flow director information.
1988 i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
1990 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1991 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1992 uint16_t num_flex_set = 0;
1993 uint16_t num_flex_mask = 0;
1995 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
1996 fdir->mode = RTE_FDIR_MODE_PERFECT;
1998 fdir->mode = RTE_FDIR_MODE_NONE;
2001 (uint32_t)hw->func_caps.fd_filters_guaranteed;
2003 (uint32_t)hw->func_caps.fd_filters_best_effort;
2004 fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
2005 fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
2006 fdir->flex_payload_unit = sizeof(uint16_t);
2007 fdir->flex_bitmask_unit = sizeof(uint16_t);
2008 fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
2009 fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF;
2010 fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD;
2012 i40e_fdir_info_get_flex_set(pf,
2013 fdir->flex_conf.flex_set,
2015 i40e_fdir_info_get_flex_mask(pf,
2016 fdir->flex_conf.flex_mask,
2019 fdir->flex_conf.nb_payloads = num_flex_set;
2020 fdir->flex_conf.nb_flexmasks = num_flex_mask;
2024 * i40e_fdir_stat_get - get statistics of Flow Director
2025 * @pf: ethernet device to get info from
2026 * @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with
2027 * the flow director statistics.
2030 i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat)
2032 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2033 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2036 fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2038 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2039 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2041 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2042 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2046 i40e_fdir_filter_set(struct rte_eth_dev *dev,
2047 struct rte_eth_fdir_filter_info *info)
2049 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2053 PMD_DRV_LOG(ERR, "Invalid pointer");
2057 switch (info->info_type) {
2058 case RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT:
2059 ret = i40e_fdir_filter_inset_select(pf,
2060 &(info->info.input_set_conf));
2063 PMD_DRV_LOG(ERR, "FD filter info type (%d) not supported",
2072 * i40e_fdir_ctrl_func - deal with all operations on flow director.
2073 * @pf: board private structure
2074 * @filter_op:operation will be taken.
2075 * @arg: a pointer to specific structure corresponding to the filter_op
2078 i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
2079 enum rte_filter_op filter_op,
2082 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2085 if ((pf->flags & I40E_FLAG_FDIR) == 0)
2088 if (filter_op == RTE_ETH_FILTER_NOP)
2091 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
2094 switch (filter_op) {
2095 case RTE_ETH_FILTER_ADD:
2096 ret = i40e_add_del_fdir_filter(dev,
2097 (struct rte_eth_fdir_filter *)arg,
2100 case RTE_ETH_FILTER_DELETE:
2101 ret = i40e_add_del_fdir_filter(dev,
2102 (struct rte_eth_fdir_filter *)arg,
2105 case RTE_ETH_FILTER_FLUSH:
2106 ret = i40e_fdir_flush(dev);
2108 case RTE_ETH_FILTER_INFO:
2109 i40e_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
2111 case RTE_ETH_FILTER_SET:
2112 ret = i40e_fdir_filter_set(dev,
2113 (struct rte_eth_fdir_filter_info *)arg);
2115 case RTE_ETH_FILTER_STATS:
2116 i40e_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
2119 PMD_DRV_LOG(ERR, "unknown operation %u.", filter_op);
2126 /* Restore flow director filter */
2128 i40e_fdir_filter_restore(struct i40e_pf *pf)
2130 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
2131 struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
2132 struct i40e_fdir_filter *f;
2133 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2135 uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
2136 uint32_t best_cnt; /**< Number of filters in best effort spaces. */
2138 TAILQ_FOREACH(f, fdir_list, rules)
2139 i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
2141 fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2143 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2144 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2146 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2147 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2149 PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d",
2150 guarant_cnt, best_cnt);