1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
16 #include <rte_memzone.h>
17 #include <rte_malloc.h>
23 #include <rte_hash_crc.h>
24 #include <rte_bitmap.h>
26 #include "i40e_logs.h"
27 #include "base/i40e_type.h"
28 #include "base/i40e_prototype.h"
29 #include "i40e_ethdev.h"
30 #include "i40e_rxtx.h"
32 #define I40E_FDIR_MZ_NAME "FDIR_MEMZONE"
34 #define IPV6_ADDR_LEN 16
38 #define IPPROTO_L2TP 115
41 #define I40E_FDIR_PKT_LEN 512
42 #define I40E_FDIR_IP_DEFAULT_LEN 420
43 #define I40E_FDIR_IP_DEFAULT_TTL 0x40
44 #define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45
45 #define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50
46 #define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60000000
48 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF
49 #define I40E_FDIR_IPv6_PAYLOAD_LEN 380
50 #define I40E_FDIR_UDP_DEFAULT_LEN 400
51 #define I40E_FDIR_GTP_DEFAULT_LEN 384
52 #define I40E_FDIR_INNER_IP_DEFAULT_LEN 384
53 #define I40E_FDIR_INNER_IPV6_DEFAULT_LEN 344
55 #define I40E_FDIR_GTPC_DST_PORT 2123
56 #define I40E_FDIR_GTPU_DST_PORT 2152
57 #define I40E_FDIR_GTP_VER_FLAG_0X30 0x30
58 #define I40E_FDIR_GTP_VER_FLAG_0X32 0x32
59 #define I40E_FDIR_GTP_MSG_TYPE_0X01 0x01
60 #define I40E_FDIR_GTP_MSG_TYPE_0XFF 0xFF
62 #define I40E_FDIR_ESP_DST_PORT 4500
64 /* Wait time for fdir filter programming */
65 #define I40E_FDIR_MAX_WAIT_US 10000
67 /* Wait count and interval for fdir filter flush */
68 #define I40E_FDIR_FLUSH_RETRY 50
69 #define I40E_FDIR_FLUSH_INTERVAL_MS 5
71 #define I40E_COUNTER_PF 2
72 /* Statistic counter index for one pf */
73 #define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF)
75 #define I40E_FDIR_FLOWS ( \
76 (1ULL << RTE_ETH_FLOW_FRAG_IPV4) | \
77 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
78 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
79 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
80 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
81 (1ULL << RTE_ETH_FLOW_FRAG_IPV6) | \
82 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
83 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
84 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
85 (1ULL << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
86 (1ULL << RTE_ETH_FLOW_L2_PAYLOAD))
88 static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
89 struct i40e_fdir_filter *filter);
90 static struct i40e_fdir_filter *
91 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
92 const struct i40e_fdir_input *input);
93 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
94 struct i40e_fdir_filter *filter);
96 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
97 enum i40e_filter_pctype pctype,
98 const struct i40e_fdir_filter_conf *filter,
99 bool add, bool wait_status);
102 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
104 struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
105 struct i40e_hmc_obj_rxq rx_ctx;
106 int err = I40E_SUCCESS;
108 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
109 /* Init the RX queue in hardware */
110 rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT;
112 rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
113 rx_ctx.qlen = rxq->nb_rx_desc;
114 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
117 rx_ctx.dtype = i40e_header_split_none;
118 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
119 rx_ctx.rxmax = I40E_ETH_MAX_LEN;
120 rx_ctx.tphrdesc_ena = 1;
121 rx_ctx.tphwdesc_ena = 1;
122 rx_ctx.tphdata_ena = 1;
123 rx_ctx.tphhead_ena = 1;
124 rx_ctx.lrxqthresh = 2;
130 err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx);
131 if (err != I40E_SUCCESS) {
132 PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context.");
135 err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx);
136 if (err != I40E_SUCCESS) {
137 PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context.");
140 rxq->qrx_tail = hw->hw_addr +
141 I40E_QRX_TAIL(rxq->vsi->base_queue);
144 /* Init the RX tail regieter. */
145 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
151 * i40e_fdir_setup - reserve and initialize the Flow Director resources
152 * @pf: board private structure
155 i40e_fdir_setup(struct i40e_pf *pf)
157 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
158 struct i40e_vsi *vsi;
159 int err = I40E_SUCCESS;
160 char z_name[RTE_MEMZONE_NAMESIZE];
161 const struct rte_memzone *mz = NULL;
162 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
165 if ((pf->flags & I40E_FLAG_FDIR) == 0) {
166 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
167 return I40E_NOT_SUPPORTED;
170 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u,"
171 " num_filters_best_effort = %u.",
172 hw->func_caps.fd_filters_guaranteed,
173 hw->func_caps.fd_filters_best_effort);
175 vsi = pf->fdir.fdir_vsi;
177 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
181 /* make new FDIR VSI */
182 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
184 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
185 return I40E_ERR_NO_AVAILABLE_VSI;
187 pf->fdir.fdir_vsi = vsi;
189 /*Fdir tx queue setup*/
190 err = i40e_fdir_setup_tx_resources(pf);
192 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
196 /*Fdir rx queue setup*/
197 err = i40e_fdir_setup_rx_resources(pf);
199 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
203 err = i40e_tx_queue_init(pf->fdir.txq);
205 PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization.");
209 /* need switch on before dev start*/
210 err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE);
212 PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on.");
216 /* Init the rx queue in hardware */
217 err = i40e_fdir_rx_queue_init(pf->fdir.rxq);
219 PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization.");
223 /* switch on rx queue */
224 err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE);
226 PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on.");
230 /* enable FDIR MSIX interrupt */
231 vsi->nb_used_qps = 1;
232 i40e_vsi_queues_bind_intr(vsi, I40E_ITR_INDEX_NONE);
233 i40e_vsi_enable_queues_intr(vsi);
235 /* reserve memory for the fdir programming packet */
236 snprintf(z_name, sizeof(z_name), "%s_%s_%d",
237 eth_dev->device->driver->name,
239 eth_dev->data->port_id);
240 mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN *
241 I40E_FDIR_PRG_PKT_CNT, SOCKET_ID_ANY);
243 PMD_DRV_LOG(ERR, "Cannot init memzone for "
244 "flow director program packet.");
245 err = I40E_ERR_NO_MEMORY;
249 for (i = 0; i < I40E_FDIR_PRG_PKT_CNT; i++) {
250 pf->fdir.prg_pkt[i] = (uint8_t *)mz->addr +
251 I40E_FDIR_PKT_LEN * i;
252 pf->fdir.dma_addr[i] = mz->iova +
253 I40E_FDIR_PKT_LEN * i;
256 pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
257 pf->fdir.fdir_actual_cnt = 0;
258 pf->fdir.fdir_guarantee_free_space =
259 pf->fdir.fdir_guarantee_total_space;
261 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
266 i40e_dev_rx_queue_release(pf->fdir.rxq);
269 i40e_dev_tx_queue_release(pf->fdir.txq);
272 i40e_vsi_release(vsi);
273 pf->fdir.fdir_vsi = NULL;
278 * i40e_fdir_teardown - release the Flow Director resources
279 * @pf: board private structure
282 i40e_fdir_teardown(struct i40e_pf *pf)
284 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
285 struct i40e_vsi *vsi;
286 struct rte_eth_dev *dev = pf->adapter->eth_dev;
288 vsi = pf->fdir.fdir_vsi;
292 /* disable FDIR MSIX interrupt */
293 i40e_vsi_queues_unbind_intr(vsi);
294 i40e_vsi_disable_queues_intr(vsi);
296 int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
298 PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off");
299 err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
301 PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
303 i40e_dev_rx_queue_release(pf->fdir.rxq);
304 rte_eth_dma_zone_free(dev, "fdir_rx_ring", pf->fdir.rxq->queue_id);
306 i40e_dev_tx_queue_release(pf->fdir.txq);
307 rte_eth_dma_zone_free(dev, "fdir_tx_ring", pf->fdir.txq->queue_id);
309 i40e_vsi_release(vsi);
310 pf->fdir.fdir_vsi = NULL;
313 /* check whether the flow director table in empty */
315 i40e_fdir_empty(struct i40e_hw *hw)
317 uint32_t guarant_cnt, best_cnt;
319 guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
320 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
321 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
322 best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
323 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
324 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
325 if (best_cnt + guarant_cnt > 0)
332 * Initialize the configuration about bytes stream extracted as flexible payload
336 i40e_init_flx_pld(struct i40e_pf *pf)
338 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
344 * Define the bytes stream extracted as flexible payload in
345 * field vector. By default, select 8 words from the beginning
346 * of payload as flexible payload.
348 for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) {
349 index = i * I40E_MAX_FLXPLD_FIED;
350 pf->fdir.flex_set[index].src_offset = 0;
351 pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM;
352 pf->fdir.flex_set[index].dst_offset = 0;
353 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900);
355 I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/
357 I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/
358 pf->fdir.flex_pit_flag[i] = 0;
361 /* initialize the masks */
362 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
363 pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
364 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
366 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
368 pf->fdir.flex_mask[pctype].word_mask = 0;
369 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
370 for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
371 pf->fdir.flex_mask[pctype].bitmask[i].offset = 0;
372 pf->fdir.flex_mask[pctype].bitmask[i].mask = 0;
373 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0);
378 #define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
379 if ((flex_pit2).src_offset < \
380 (flex_pit1).src_offset + (flex_pit1).size) { \
381 PMD_DRV_LOG(ERR, "src_offset should be not" \
382 " less than than previous offset" \
383 " + previous FSIZE."); \
389 * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure,
390 * and the flex_pit will be sorted by it's src_offset value
392 static inline uint16_t
393 i40e_srcoff_to_flx_pit(const uint16_t *src_offset,
394 struct i40e_fdir_flex_pit *flex_pit)
396 uint16_t src_tmp, size, num = 0;
397 uint16_t i, k, j = 0;
399 while (j < I40E_FDIR_MAX_FLEX_LEN) {
401 for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) {
402 if (src_offset[j + 1] == src_offset[j] + 1)
407 src_tmp = src_offset[j] + 1 - size;
408 /* the flex_pit need to be sort by src_offset */
409 for (i = 0; i < num; i++) {
410 if (src_tmp < flex_pit[i].src_offset)
413 /* if insert required, move backward */
414 for (k = num; k > i; k--)
415 flex_pit[k] = flex_pit[k - 1];
417 flex_pit[i].dst_offset = j + 1 - size;
418 flex_pit[i].src_offset = src_tmp;
419 flex_pit[i].size = size;
426 /* i40e_check_fdir_flex_payload -check flex payload configuration arguments */
428 i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
430 struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN];
433 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
434 if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
435 PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
440 memset(flex_pit, 0, sizeof(flex_pit));
441 num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
442 if (num > I40E_MAX_FLXPLD_FIED) {
443 PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
446 for (i = 0; i < num; i++) {
447 if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 ||
448 flex_pit[i].src_offset & 0x01) {
449 PMD_DRV_LOG(ERR, "flexpayload should be measured"
454 I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]);
460 * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration
461 * arguments are valid
464 i40e_check_fdir_flex_conf(const struct i40e_adapter *adapter,
465 const struct rte_eth_fdir_flex_conf *conf)
467 const struct rte_eth_flex_payload_cfg *flex_cfg;
468 const struct rte_eth_fdir_flex_mask *flex_mask;
473 enum i40e_filter_pctype pctype;
476 PMD_DRV_LOG(INFO, "NULL pointer.");
479 /* check flexible payload setting configuration */
480 if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) {
481 PMD_DRV_LOG(ERR, "invalid number of payload setting.");
484 for (i = 0; i < conf->nb_payloads; i++) {
485 flex_cfg = &conf->flex_set[i];
486 if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) {
487 PMD_DRV_LOG(ERR, "invalid payload type.");
490 ret = i40e_check_fdir_flex_payload(flex_cfg);
492 PMD_DRV_LOG(ERR, "invalid flex payload arguments.");
497 /* check flex mask setting configuration */
498 if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) {
499 PMD_DRV_LOG(ERR, "invalid number of flex masks.");
502 for (i = 0; i < conf->nb_flexmasks; i++) {
503 flex_mask = &conf->flex_mask[i];
504 pctype = i40e_flowtype_to_pctype(adapter, flex_mask->flow_type);
505 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
506 PMD_DRV_LOG(WARNING, "invalid flow type.");
510 for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) {
511 mask_tmp = I40E_WORD(flex_mask->mask[j],
512 flex_mask->mask[j + 1]);
513 if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) {
515 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) {
516 PMD_DRV_LOG(ERR, " exceed maximal"
517 " number of bitmasks.");
527 * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload
528 * @pf: board private structure
529 * @cfg: the rule how bytes stream is extracted as flexible payload
532 i40e_set_flx_pld_cfg(struct i40e_pf *pf,
533 const struct rte_eth_flex_payload_cfg *cfg)
535 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
536 struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
537 uint32_t flx_pit, flx_ort;
538 uint16_t num, min_next_off; /* in words */
539 uint8_t field_idx = 0;
540 uint8_t layer_idx = 0;
543 if (cfg->type == RTE_ETH_L2_PAYLOAD)
544 layer_idx = I40E_FLXPLD_L2_IDX;
545 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
546 layer_idx = I40E_FLXPLD_L3_IDX;
547 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
548 layer_idx = I40E_FLXPLD_L4_IDX;
550 memset(flex_pit, 0, sizeof(flex_pit));
551 num = RTE_MIN(i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit),
555 flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
556 (num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
557 (layer_idx * I40E_MAX_FLXPLD_FIED);
558 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
561 for (i = 0; i < num; i++) {
562 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
563 /* record the info in fdir structure */
564 pf->fdir.flex_set[field_idx].src_offset =
565 flex_pit[i].src_offset / sizeof(uint16_t);
566 pf->fdir.flex_set[field_idx].size =
567 flex_pit[i].size / sizeof(uint16_t);
568 pf->fdir.flex_set[field_idx].dst_offset =
569 flex_pit[i].dst_offset / sizeof(uint16_t);
570 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
571 pf->fdir.flex_set[field_idx].size,
572 pf->fdir.flex_set[field_idx].dst_offset);
574 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
576 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
577 pf->fdir.flex_set[field_idx].size;
579 for (; i < I40E_MAX_FLXPLD_FIED; i++) {
580 /* set the non-used register obeying register's constrain */
581 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
582 NONUSE_FLX_PIT_DEST_OFF);
584 I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i),
591 * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload
592 * @pf: board private structure
593 * @pctype: packet classify type
594 * @flex_masks: mask for flexible payload
597 i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
598 enum i40e_filter_pctype pctype,
599 const struct rte_eth_fdir_flex_mask *mask_cfg)
601 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
602 struct i40e_fdir_flex_mask *flex_mask;
603 uint32_t flxinset, fd_mask;
605 uint8_t i, nb_bitmask = 0;
607 flex_mask = &pf->fdir.flex_mask[pctype];
608 memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
609 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
610 mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]);
611 if (mask_tmp != 0x0) {
612 flex_mask->word_mask |=
613 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
614 if (mask_tmp != UINT16_MAX) {
616 flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp;
617 flex_mask->bitmask[nb_bitmask].offset =
618 i / sizeof(uint16_t);
623 /* write mask to hw */
624 flxinset = (flex_mask->word_mask <<
625 I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
626 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
627 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
629 for (i = 0; i < nb_bitmask; i++) {
630 fd_mask = (flex_mask->bitmask[i].mask <<
631 I40E_PRTQF_FD_MSK_MASK_SHIFT) &
632 I40E_PRTQF_FD_MSK_MASK_MASK;
633 fd_mask |= ((flex_mask->bitmask[i].offset +
634 I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
635 I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
636 I40E_PRTQF_FD_MSK_OFFSET_MASK;
637 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
642 * Enable/disable flow director RX processing in vector routines.
645 i40e_fdir_rx_proc_enable(struct rte_eth_dev *dev, bool on)
649 for (i = 0; i < dev->data->nb_rx_queues; i++) {
650 struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
653 rxq->fdir_enabled = on;
655 PMD_DRV_LOG(DEBUG, "Flow Director processing on RX set to %d", on);
659 * Configure flow director related setting
662 i40e_fdir_configure(struct rte_eth_dev *dev)
664 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
665 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
666 struct rte_eth_fdir_flex_conf *conf;
667 enum i40e_filter_pctype pctype;
673 * configuration need to be done before
674 * flow director filters are added
675 * If filters exist, flush them.
677 if (i40e_fdir_empty(hw) < 0) {
678 ret = i40e_fdir_flush(dev);
680 PMD_DRV_LOG(ERR, "failed to flush fdir table.");
685 /* enable FDIR filter */
686 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
687 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
688 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
690 i40e_init_flx_pld(pf); /* set flex config to default value */
692 conf = &dev->data->dev_conf.fdir_conf.flex_conf;
693 ret = i40e_check_fdir_flex_conf(pf->adapter, conf);
695 PMD_DRV_LOG(ERR, " invalid configuration arguments.");
699 if (!pf->support_multi_driver) {
700 /* configure flex payload */
701 for (i = 0; i < conf->nb_payloads; i++)
702 i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
703 /* configure flex mask*/
704 for (i = 0; i < conf->nb_flexmasks; i++) {
705 if (hw->mac.type == I40E_MAC_X722) {
706 /* get pctype value in fd pctype register */
707 pctype = (enum i40e_filter_pctype)
709 I40E_GLQF_FD_PCTYPES(
710 (int)i40e_flowtype_to_pctype(
712 conf->flex_mask[i].flow_type)));
714 pctype = i40e_flowtype_to_pctype(pf->adapter,
715 conf->flex_mask[i].flow_type);
718 i40e_set_flex_mask_on_pctype(pf, pctype,
719 &conf->flex_mask[i]);
722 PMD_DRV_LOG(ERR, "Not support flexible payload.");
725 /* Enable FDIR processing in RX routines */
726 i40e_fdir_rx_proc_enable(dev, 1);
732 static struct i40e_customized_pctype *
733 i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype)
735 struct i40e_customized_pctype *cus_pctype;
736 enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC;
738 for (; i < I40E_CUSTOMIZED_MAX; i++) {
739 cus_pctype = &pf->customized_pctype[i];
740 if (pctype == cus_pctype->pctype)
747 fill_ip6_head(const struct i40e_fdir_input *fdir_input, unsigned char *raw_pkt,
748 uint8_t next_proto, uint8_t len, uint16_t *ether_type)
750 struct rte_ipv6_hdr *ip6;
752 ip6 = (struct rte_ipv6_hdr *)raw_pkt;
754 *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
755 ip6->vtc_flow = rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
756 (fdir_input->flow.ipv6_flow.tc << I40E_FDIR_IPv6_TC_OFFSET));
757 ip6->payload_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
758 ip6->proto = fdir_input->flow.ipv6_flow.proto ?
759 fdir_input->flow.ipv6_flow.proto : next_proto;
760 ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
761 fdir_input->flow.ipv6_flow.hop_limits :
762 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
764 * The source and destination fields in the transmitted packet
765 * need to be presented in a reversed order with respect
766 * to the expected received packets.
768 rte_memcpy(&ip6->src_addr, &fdir_input->flow.ipv6_flow.dst_ip,
770 rte_memcpy(&ip6->dst_addr, &fdir_input->flow.ipv6_flow.src_ip,
772 len += sizeof(struct rte_ipv6_hdr);
778 fill_ip4_head(const struct i40e_fdir_input *fdir_input, unsigned char *raw_pkt,
779 uint8_t next_proto, uint8_t len, uint16_t *ether_type)
781 struct rte_ipv4_hdr *ip4;
783 ip4 = (struct rte_ipv4_hdr *)raw_pkt;
785 *ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
786 ip4->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
787 /* set len to by default */
788 ip4->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
789 ip4->time_to_live = fdir_input->flow.ip4_flow.ttl ?
790 fdir_input->flow.ip4_flow.ttl :
791 I40E_FDIR_IP_DEFAULT_TTL;
792 ip4->type_of_service = fdir_input->flow.ip4_flow.tos;
793 ip4->next_proto_id = fdir_input->flow.ip4_flow.proto ?
794 fdir_input->flow.ip4_flow.proto : next_proto;
796 * The source and destination fields in the transmitted packet
797 * need to be presented in a reversed order with respect
798 * to the expected received packets.
800 ip4->src_addr = fdir_input->flow.ip4_flow.dst_ip;
801 ip4->dst_addr = fdir_input->flow.ip4_flow.src_ip;
802 len += sizeof(struct rte_ipv4_hdr);
808 i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
809 const struct i40e_fdir_input *fdir_input,
810 unsigned char *raw_pkt,
813 struct i40e_customized_pctype *cus_pctype = NULL;
814 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
815 uint16_t *ether_type;
816 uint8_t len = 2 * sizeof(struct rte_ether_addr);
817 uint8_t pctype = fdir_input->pctype;
818 bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
819 static const uint8_t next_proto[] = {
820 [I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
821 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
822 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
823 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
824 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
825 [I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
826 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
827 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
828 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
829 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
832 rte_memcpy(raw_pkt, &fdir_input->flow.l2_flow.dst,
833 sizeof(struct rte_ether_addr));
834 rte_memcpy(raw_pkt + sizeof(struct rte_ether_addr),
835 &fdir_input->flow.l2_flow.src,
836 sizeof(struct rte_ether_addr));
837 raw_pkt += 2 * sizeof(struct rte_ether_addr);
839 if (vlan && fdir_input->flow_ext.vlan_tci) {
840 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
841 rte_memcpy(raw_pkt + sizeof(uint16_t),
842 &fdir_input->flow_ext.vlan_tci,
844 raw_pkt += sizeof(vlan_frame);
845 len += sizeof(vlan_frame);
847 ether_type = (uint16_t *)raw_pkt;
848 raw_pkt += sizeof(uint16_t);
849 len += sizeof(uint16_t);
851 if (is_customized_pctype) {
852 cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
854 PMD_DRV_LOG(ERR, "unknown pctype %u.",
860 if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
861 *ether_type = fdir_input->flow.l2_flow.ether_type;
862 else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
863 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
864 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
865 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
866 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
867 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
868 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
869 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
870 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
871 pctype == I40E_FILTER_PCTYPE_FRAG_IPV6 ||
872 is_customized_pctype) {
873 if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
874 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
875 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
876 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
877 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
878 len = fill_ip4_head(fdir_input, raw_pkt,
879 next_proto[pctype], len, ether_type);
880 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
881 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
882 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
883 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
884 pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
885 len = fill_ip6_head(fdir_input, raw_pkt,
886 next_proto[pctype], len,
888 } else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
889 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
890 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
891 cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
892 len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP,
894 } else if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3) {
895 len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_L2TP,
897 } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4) {
898 len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_ESP,
900 } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP) {
901 len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP,
903 } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP) {
904 len = fill_ip4_head(fdir_input, raw_pkt, IPPROTO_UDP,
906 } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6)
907 len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_ESP,
909 else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6_UDP)
910 len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_UDP,
912 else if (cus_pctype->index == I40E_CUSTOMIZED_IPV6_L2TPV3)
913 len = fill_ip6_head(fdir_input, raw_pkt, IPPROTO_L2TP,
916 PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
924 * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
925 * @pf: board private structure
926 * @fdir_input: input set of the flow director entry
927 * @raw_pkt: a packet to be constructed
930 i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
931 const struct i40e_fdir_input *fdir_input,
932 unsigned char *raw_pkt)
934 unsigned char *payload = NULL;
936 struct rte_udp_hdr *udp;
937 struct rte_tcp_hdr *tcp;
938 struct rte_sctp_hdr *sctp;
939 struct rte_flow_item_gtp *gtp;
940 struct rte_ipv4_hdr *gtp_ipv4;
941 struct rte_ipv6_hdr *gtp_ipv6;
942 struct rte_flow_item_l2tpv3oip *l2tpv3oip;
943 struct rte_flow_item_esp *esp;
944 struct rte_ipv4_hdr *esp_ipv4;
945 struct rte_ipv6_hdr *esp_ipv6;
947 uint8_t size, dst = 0;
948 uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
950 uint8_t pctype = fdir_input->pctype;
951 struct i40e_customized_pctype *cus_pctype;
953 /* raw pcket template - just copy contents of the raw packet */
954 if (fdir_input->flow_ext.pkt_template) {
955 memcpy(raw_pkt, fdir_input->flow.raw_flow.packet,
956 fdir_input->flow.raw_flow.length);
960 /* fill the ethernet and IP head */
961 len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
962 !!fdir_input->flow_ext.vlan_tci);
966 /* fill the L4 head */
967 if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
968 udp = (struct rte_udp_hdr *)(raw_pkt + len);
969 payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
971 * The source and destination fields in the transmitted packet
972 * need to be presented in a reversed order with respect
973 * to the expected received packets.
975 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
976 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
977 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
978 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
979 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
980 payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
982 * The source and destination fields in the transmitted packet
983 * need to be presented in a reversed order with respect
984 * to the expected received packets.
986 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
987 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
988 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
989 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
990 sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
991 payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
993 * The source and destination fields in the transmitted packet
994 * need to be presented in a reversed order with respect
995 * to the expected received packets.
997 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
998 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
999 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
1000 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1001 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
1002 payload = raw_pkt + len;
1003 set_idx = I40E_FLXPLD_L3_IDX;
1004 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
1005 udp = (struct rte_udp_hdr *)(raw_pkt + len);
1006 payload = (unsigned char *)udp + sizeof(struct rte_udp_hdr);
1008 * The source and destination fields in the transmitted packet
1009 * need to be presented in a reversed order with respect
1010 * to the expected received packets.
1012 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
1013 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
1014 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
1015 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
1016 tcp = (struct rte_tcp_hdr *)(raw_pkt + len);
1017 payload = (unsigned char *)tcp + sizeof(struct rte_tcp_hdr);
1019 * The source and destination fields in the transmitted packet
1020 * need to be presented in a reversed order with respect
1021 * to the expected received packets.
1023 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1024 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
1025 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
1026 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
1027 sctp = (struct rte_sctp_hdr *)(raw_pkt + len);
1028 payload = (unsigned char *)sctp + sizeof(struct rte_sctp_hdr);
1030 * The source and destination fields in the transmitted packet
1031 * need to be presented in a reversed order with respect
1032 * to the expected received packets.
1034 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
1035 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
1036 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
1037 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1038 pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
1039 payload = raw_pkt + len;
1040 set_idx = I40E_FLXPLD_L3_IDX;
1041 } else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1042 payload = raw_pkt + len;
1044 * ARP packet is a special case on which the payload
1045 * starts after the whole ARP header
1047 if (fdir_input->flow.l2_flow.ether_type ==
1048 rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
1049 payload += sizeof(struct rte_arp_hdr);
1050 set_idx = I40E_FLXPLD_L2_IDX;
1051 } else if (fdir_input->flow_ext.customized_pctype) {
1052 /* If customized pctype is used */
1053 cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
1054 if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1055 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1056 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1057 cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
1058 udp = (struct rte_udp_hdr *)(raw_pkt + len);
1060 rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1062 gtp = (struct rte_flow_item_gtp *)
1063 ((unsigned char *)udp +
1064 sizeof(struct rte_udp_hdr));
1066 rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
1067 gtp->teid = fdir_input->flow.gtp_flow.teid;
1068 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0X01;
1070 /* GTP-C message type is not supported. */
1071 if (cus_pctype->index == I40E_CUSTOMIZED_GTPC) {
1073 rte_cpu_to_be_16(I40E_FDIR_GTPC_DST_PORT);
1074 gtp->v_pt_rsv_flags =
1075 I40E_FDIR_GTP_VER_FLAG_0X32;
1078 rte_cpu_to_be_16(I40E_FDIR_GTPU_DST_PORT);
1079 gtp->v_pt_rsv_flags =
1080 I40E_FDIR_GTP_VER_FLAG_0X30;
1083 if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
1084 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1085 gtp_ipv4 = (struct rte_ipv4_hdr *)
1086 ((unsigned char *)gtp +
1087 sizeof(struct rte_flow_item_gtp));
1088 gtp_ipv4->version_ihl =
1089 I40E_FDIR_IP_DEFAULT_VERSION_IHL;
1090 gtp_ipv4->next_proto_id = IPPROTO_IP;
1091 gtp_ipv4->total_length =
1093 I40E_FDIR_INNER_IP_DEFAULT_LEN);
1094 payload = (unsigned char *)gtp_ipv4 +
1095 sizeof(struct rte_ipv4_hdr);
1096 } else if (cus_pctype->index ==
1097 I40E_CUSTOMIZED_GTPU_IPV6) {
1098 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1099 gtp_ipv6 = (struct rte_ipv6_hdr *)
1100 ((unsigned char *)gtp +
1101 sizeof(struct rte_flow_item_gtp));
1102 gtp_ipv6->vtc_flow =
1104 I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
1105 (0 << I40E_FDIR_IPv6_TC_OFFSET));
1106 gtp_ipv6->proto = IPPROTO_NONE;
1107 gtp_ipv6->payload_len =
1109 I40E_FDIR_INNER_IPV6_DEFAULT_LEN);
1110 gtp_ipv6->hop_limits =
1111 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
1112 payload = (unsigned char *)gtp_ipv6 +
1113 sizeof(struct rte_ipv6_hdr);
1115 payload = (unsigned char *)gtp +
1116 sizeof(struct rte_flow_item_gtp);
1117 } else if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3 ||
1118 cus_pctype->index == I40E_CUSTOMIZED_IPV6_L2TPV3) {
1119 l2tpv3oip = (struct rte_flow_item_l2tpv3oip *)(raw_pkt
1122 if (cus_pctype->index == I40E_CUSTOMIZED_IPV4_L2TPV3)
1123 l2tpv3oip->session_id =
1124 fdir_input->flow.ip4_l2tpv3oip_flow.session_id;
1126 l2tpv3oip->session_id =
1127 fdir_input->flow.ip6_l2tpv3oip_flow.session_id;
1128 payload = (unsigned char *)l2tpv3oip +
1129 sizeof(struct rte_flow_item_l2tpv3oip);
1130 } else if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4 ||
1131 cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6 ||
1132 cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4_UDP ||
1133 cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV6_UDP) {
1134 if (cus_pctype->index == I40E_CUSTOMIZED_ESP_IPV4) {
1135 esp_ipv4 = (struct rte_ipv4_hdr *)
1137 esp = (struct rte_flow_item_esp *)esp_ipv4;
1139 fdir_input->flow.esp_ipv4_flow.spi;
1140 payload = (unsigned char *)esp +
1141 sizeof(struct rte_esp_hdr);
1142 len += sizeof(struct rte_esp_hdr);
1143 } else if (cus_pctype->index ==
1144 I40E_CUSTOMIZED_ESP_IPV4_UDP) {
1145 esp_ipv4 = (struct rte_ipv4_hdr *)
1147 udp = (struct rte_udp_hdr *)esp_ipv4;
1148 udp->dst_port = rte_cpu_to_be_16
1149 (I40E_FDIR_ESP_DST_PORT);
1151 udp->dgram_len = rte_cpu_to_be_16
1152 (I40E_FDIR_UDP_DEFAULT_LEN);
1153 esp = (struct rte_flow_item_esp *)
1154 ((unsigned char *)esp_ipv4 +
1155 sizeof(struct rte_udp_hdr));
1157 fdir_input->flow.esp_ipv4_udp_flow.spi;
1158 payload = (unsigned char *)esp +
1159 sizeof(struct rte_esp_hdr);
1160 len += sizeof(struct rte_udp_hdr) +
1161 sizeof(struct rte_esp_hdr);
1162 } else if (cus_pctype->index ==
1163 I40E_CUSTOMIZED_ESP_IPV6) {
1164 esp_ipv6 = (struct rte_ipv6_hdr *)
1166 esp = (struct rte_flow_item_esp *)esp_ipv6;
1168 fdir_input->flow.esp_ipv6_flow.spi;
1169 payload = (unsigned char *)esp +
1170 sizeof(struct rte_esp_hdr);
1171 len += sizeof(struct rte_esp_hdr);
1172 } else if (cus_pctype->index ==
1173 I40E_CUSTOMIZED_ESP_IPV6_UDP) {
1174 esp_ipv6 = (struct rte_ipv6_hdr *)
1176 udp = (struct rte_udp_hdr *)esp_ipv6;
1177 udp->dst_port = rte_cpu_to_be_16
1178 (I40E_FDIR_ESP_DST_PORT);
1180 udp->dgram_len = rte_cpu_to_be_16
1181 (I40E_FDIR_UDP_DEFAULT_LEN);
1182 esp = (struct rte_flow_item_esp *)
1183 ((unsigned char *)esp_ipv6 +
1184 sizeof(struct rte_udp_hdr));
1186 fdir_input->flow.esp_ipv6_udp_flow.spi;
1187 payload = (unsigned char *)esp +
1188 sizeof(struct rte_esp_hdr);
1189 len += sizeof(struct rte_udp_hdr) +
1190 sizeof(struct rte_esp_hdr);
1194 PMD_DRV_LOG(ERR, "unknown pctype %u.", fdir_input->pctype);
1198 /* fill the flexbytes to payload */
1199 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
1200 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
1201 size = pf->fdir.flex_set[pit_idx].size;
1204 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
1206 pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
1207 (void)rte_memcpy(ptr,
1208 &fdir_input->flow_ext.flexbytes[dst],
1209 size * sizeof(uint16_t));
1215 /* Construct the tx flags */
1216 static inline uint64_t
1217 i40e_build_ctob(uint32_t td_cmd,
1222 return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
1223 ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
1224 ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
1225 ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
1226 ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
1230 * check the programming status descriptor in rx queue.
1231 * done after Programming Flow Director is programmed on
1235 i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
1237 volatile union i40e_rx_desc *rxdp;
1244 rxdp = &rxq->rx_ring[rxq->rx_tail];
1245 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1246 rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
1247 >> I40E_RXD_QW1_STATUS_SHIFT;
1249 if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
1250 len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT;
1251 id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1252 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1254 if (len == I40E_RX_PROG_STATUS_DESC_LENGTH &&
1255 id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) {
1257 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
1258 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
1259 if (error == (0x1 <<
1260 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
1261 PMD_DRV_LOG(ERR, "Failed to add FDIR filter"
1262 " (FD_ID %u): programming status"
1264 rxdp->wb.qword0.hi_dword.fd_id);
1266 } else if (error == (0x1 <<
1267 I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
1268 PMD_DRV_LOG(ERR, "Failed to delete FDIR filter"
1269 " (FD_ID %u): programming status"
1271 rxdp->wb.qword0.hi_dword.fd_id);
1274 PMD_DRV_LOG(ERR, "invalid programming status"
1275 " reported, error = %u.", error);
1277 PMD_DRV_LOG(INFO, "unknown programming status"
1278 " reported, len = %d, id = %u.", len, id);
1279 rxdp->wb.qword1.status_error_len = 0;
1281 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
1283 if (rxq->rx_tail == 0)
1284 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1286 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1);
1293 i40e_fdir_programming_status_cleanup(struct i40e_rx_queue *rxq)
1295 uint16_t retry_count = 0;
1297 /* capture the previous error report(if any) from rx ring */
1298 while ((i40e_check_fdir_programming_status(rxq) < 0) &&
1299 (++retry_count < I40E_FDIR_NUM_RX_DESC))
1300 PMD_DRV_LOG(INFO, "error report captured.");
1304 i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
1305 struct i40e_fdir_filter *filter)
1307 rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
1308 if (input->input.flow_ext.pkt_template) {
1309 filter->fdir.input.flow.raw_flow.packet = NULL;
1310 filter->fdir.input.flow.raw_flow.length =
1311 rte_hash_crc(input->input.flow.raw_flow.packet,
1312 input->input.flow.raw_flow.length,
1313 input->input.flow.raw_flow.pctype);
1318 /* Check if there exists the flow director filter */
1319 static struct i40e_fdir_filter *
1320 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
1321 const struct i40e_fdir_input *input)
1325 if (input->flow_ext.pkt_template)
1326 ret = rte_hash_lookup_with_hash(fdir_info->hash_table,
1327 (const void *)input,
1328 input->flow.raw_flow.length);
1330 ret = rte_hash_lookup(fdir_info->hash_table,
1331 (const void *)input);
1335 return fdir_info->hash_map[ret];
1338 /* Add a flow director filter into the SW list */
1340 i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
1342 struct i40e_fdir_info *fdir_info = &pf->fdir;
1343 struct i40e_fdir_filter *hash_filter;
1346 if (filter->fdir.input.flow_ext.pkt_template)
1347 ret = rte_hash_add_key_with_hash(fdir_info->hash_table,
1348 &filter->fdir.input,
1349 filter->fdir.input.flow.raw_flow.length);
1351 ret = rte_hash_add_key(fdir_info->hash_table,
1352 &filter->fdir.input);
1355 "Failed to insert fdir filter to hash table %d!",
1360 if (fdir_info->hash_map[ret])
1363 hash_filter = &fdir_info->fdir_filter_array[ret];
1364 rte_memcpy(hash_filter, filter, sizeof(*filter));
1365 fdir_info->hash_map[ret] = hash_filter;
1366 TAILQ_INSERT_TAIL(&fdir_info->fdir_list, hash_filter, rules);
1371 /* Delete a flow director filter from the SW list */
1373 i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
1375 struct i40e_fdir_info *fdir_info = &pf->fdir;
1376 struct i40e_fdir_filter *filter;
1379 if (input->flow_ext.pkt_template)
1380 ret = rte_hash_del_key_with_hash(fdir_info->hash_table,
1382 input->flow.raw_flow.length);
1384 ret = rte_hash_del_key(fdir_info->hash_table, input);
1387 "Failed to delete fdir filter to hash table %d!",
1391 filter = fdir_info->hash_map[ret];
1392 fdir_info->hash_map[ret] = NULL;
1394 TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
1400 i40e_fdir_entry_pool_get(struct i40e_fdir_info *fdir_info)
1402 struct rte_flow *flow = NULL;
1408 if (fdir_info->fdir_actual_cnt >=
1409 fdir_info->fdir_space_size) {
1410 PMD_DRV_LOG(ERR, "Fdir space full");
1414 ret = rte_bitmap_scan(fdir_info->fdir_flow_pool.bitmap, &pos,
1417 /* normally this won't happen as the fdir_actual_cnt should be
1418 * same with the number of the set bits in fdir_flow_pool,
1419 * but anyway handle this error condition here for safe
1422 PMD_DRV_LOG(ERR, "fdir_actual_cnt out of sync");
1426 i = rte_bsf64(slab);
1428 rte_bitmap_clear(fdir_info->fdir_flow_pool.bitmap, pos);
1429 flow = &fdir_info->fdir_flow_pool.pool[pos].flow;
1431 memset(flow, 0, sizeof(struct rte_flow));
1437 i40e_fdir_entry_pool_put(struct i40e_fdir_info *fdir_info,
1438 struct rte_flow *flow)
1440 struct i40e_fdir_entry *f;
1442 f = FLOW_TO_FLOW_BITMAP(flow);
1443 rte_bitmap_set(fdir_info->fdir_flow_pool.bitmap, f->idx);
1447 i40e_flow_store_flex_pit(struct i40e_pf *pf,
1448 struct i40e_fdir_flex_pit *flex_pit,
1449 enum i40e_flxpld_layer_idx layer_idx,
1454 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
1455 /* Check if the configuration is conflicted */
1456 if (pf->fdir.flex_pit_flag[layer_idx] &&
1457 (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
1458 pf->fdir.flex_set[field_idx].size != flex_pit->size ||
1459 pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
1462 /* Check if the configuration exists. */
1463 if (pf->fdir.flex_pit_flag[layer_idx] &&
1464 (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
1465 pf->fdir.flex_set[field_idx].size == flex_pit->size &&
1466 pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
1469 pf->fdir.flex_set[field_idx].src_offset =
1470 flex_pit->src_offset;
1471 pf->fdir.flex_set[field_idx].size =
1473 pf->fdir.flex_set[field_idx].dst_offset =
1474 flex_pit->dst_offset;
1480 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
1481 enum i40e_flxpld_layer_idx layer_idx,
1484 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1485 uint32_t flx_pit, flx_ort;
1486 uint16_t min_next_off = 0;
1491 flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
1492 (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
1493 (layer_idx * I40E_MAX_FLXPLD_FIED);
1494 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
1498 for (i = 0; i < raw_id; i++) {
1499 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
1500 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
1501 pf->fdir.flex_set[field_idx].size,
1502 pf->fdir.flex_set[field_idx].dst_offset);
1504 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
1505 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
1506 pf->fdir.flex_set[field_idx].size;
1509 for (; i < I40E_MAX_FLXPLD_FIED; i++) {
1510 /* set the non-used register obeying register's constrain */
1511 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
1512 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
1513 NONUSE_FLX_PIT_DEST_OFF);
1514 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
1520 i40e_flow_store_flex_mask(struct i40e_pf *pf,
1521 enum i40e_filter_pctype pctype,
1524 struct i40e_fdir_flex_mask flex_mask;
1525 uint8_t nb_bitmask = 0;
1529 memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
1530 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
1531 mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
1533 flex_mask.word_mask |=
1534 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
1535 if (mask_tmp != UINT16_MAX) {
1536 flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
1537 flex_mask.bitmask[nb_bitmask].offset =
1538 i / sizeof(uint16_t);
1540 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
1545 flex_mask.nb_bitmask = nb_bitmask;
1547 if (pf->fdir.flex_mask_flag[pctype] &&
1548 (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
1549 sizeof(struct i40e_fdir_flex_mask))))
1551 else if (pf->fdir.flex_mask_flag[pctype] &&
1552 !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
1553 sizeof(struct i40e_fdir_flex_mask))))
1556 memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
1557 sizeof(struct i40e_fdir_flex_mask));
1562 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
1563 enum i40e_filter_pctype pctype)
1565 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1566 struct i40e_fdir_flex_mask *flex_mask;
1567 uint32_t flxinset, fd_mask;
1571 flex_mask = &pf->fdir.flex_mask[pctype];
1572 flxinset = (flex_mask->word_mask <<
1573 I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
1574 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
1575 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
1577 for (i = 0; i < flex_mask->nb_bitmask; i++) {
1578 fd_mask = (flex_mask->bitmask[i].mask <<
1579 I40E_PRTQF_FD_MSK_MASK_SHIFT) &
1580 I40E_PRTQF_FD_MSK_MASK_MASK;
1581 fd_mask |= ((flex_mask->bitmask[i].offset +
1582 I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
1583 I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
1584 I40E_PRTQF_FD_MSK_OFFSET_MASK;
1585 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
1588 pf->fdir.flex_mask_flag[pctype] = 1;
1591 static inline unsigned char *
1592 i40e_find_available_buffer(struct rte_eth_dev *dev)
1594 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1595 struct i40e_fdir_info *fdir_info = &pf->fdir;
1596 struct i40e_tx_queue *txq = pf->fdir.txq;
1598 /* no available buffer
1599 * search for more available buffers from the current
1600 * descriptor, until an unavailable one
1602 if (fdir_info->txq_available_buf_count <= 0) {
1604 volatile struct i40e_tx_desc *tmp_txdp;
1606 tmp_tail = txq->tx_tail;
1607 tmp_txdp = &txq->tx_ring[tmp_tail + 1];
1610 if ((tmp_txdp->cmd_type_offset_bsz &
1611 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1612 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1613 fdir_info->txq_available_buf_count++;
1618 if (tmp_tail >= txq->nb_tx_desc)
1620 } while (tmp_tail != txq->tx_tail);
1623 if (fdir_info->txq_available_buf_count > 0)
1624 fdir_info->txq_available_buf_count--;
1627 return (unsigned char *)fdir_info->prg_pkt[txq->tx_tail >> 1];
1631 * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
1632 * @pf: board private structure
1633 * @filter: fdir filter entry
1634 * @add: 0 - delete, 1 - add
1637 i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
1638 const struct i40e_fdir_filter_conf *filter,
1641 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1642 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1643 enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
1644 struct i40e_fdir_info *fdir_info = &pf->fdir;
1645 uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
1646 struct i40e_fdir_filter check_filter; /* Check if the filter exists */
1647 struct i40e_fdir_flex_pit flex_pit;
1648 enum i40e_filter_pctype pctype;
1649 struct i40e_fdir_filter *node;
1650 unsigned char *pkt = NULL;
1651 bool cfg_flex_pit = true;
1652 bool wait_status = true;
1657 if (pf->fdir.fdir_vsi == NULL) {
1658 PMD_DRV_LOG(ERR, "FDIR is not enabled");
1662 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1663 PMD_DRV_LOG(ERR, "Invalid queue ID");
1666 if (filter->input.flow_ext.is_vf &&
1667 filter->input.flow_ext.dst_id >= pf->vf_num) {
1668 PMD_DRV_LOG(ERR, "Invalid VF ID");
1671 if (filter->input.flow_ext.pkt_template) {
1672 if (filter->input.flow.raw_flow.length > I40E_FDIR_PKT_LEN ||
1673 !filter->input.flow.raw_flow.packet) {
1674 PMD_DRV_LOG(ERR, "Invalid raw packet template"
1675 " flow filter parameters!");
1678 pctype = filter->input.flow.raw_flow.pctype;
1680 pctype = filter->input.pctype;
1683 /* Check if there is the filter in SW list */
1684 memset(&check_filter, 0, sizeof(check_filter));
1685 i40e_fdir_filter_convert(filter, &check_filter);
1688 if (filter->input.flow_ext.is_flex_flow) {
1689 for (i = 0; i < filter->input.flow_ext.raw_id; i++) {
1690 layer_idx = filter->input.flow_ext.layer_idx;
1691 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
1692 flex_pit = filter->input.flow_ext.flex_pit[field_idx];
1694 /* Store flex pit to SW */
1695 ret = i40e_flow_store_flex_pit(pf, &flex_pit,
1698 PMD_DRV_LOG(ERR, "Conflict with the"
1699 " first flexible rule.");
1701 } else if (ret > 0) {
1702 cfg_flex_pit = false;
1707 i40e_flow_set_fdir_flex_pit(pf, layer_idx,
1708 filter->input.flow_ext.raw_id);
1710 /* Store flex mask to SW */
1711 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++)
1713 filter->input.flow_ext.flex_mask[i];
1715 ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
1717 PMD_DRV_LOG(ERR, "Exceed maximal"
1718 " number of bitmasks");
1720 } else if (ret == -2) {
1721 PMD_DRV_LOG(ERR, "Conflict with the"
1722 " first flexible rule");
1724 } else if (ret == 0) {
1725 i40e_flow_set_fdir_flex_msk(pf, pctype);
1729 ret = i40e_sw_fdir_filter_insert(pf, &check_filter);
1732 "Conflict with existing flow director rules!");
1736 if (fdir_info->fdir_invalprio == 1 &&
1737 fdir_info->fdir_guarantee_free_space > 0)
1738 wait_status = false;
1740 if (filter->input.flow_ext.is_flex_flow)
1741 layer_idx = filter->input.flow_ext.layer_idx;
1743 node = i40e_sw_fdir_filter_lookup(fdir_info,
1744 &check_filter.fdir.input);
1747 "There's no corresponding flow firector filter!");
1751 ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
1754 "Error deleting fdir rule from hash table!");
1758 pf->fdir.flex_mask_flag[pctype] = 0;
1760 if (fdir_info->fdir_invalprio == 1)
1761 wait_status = false;
1764 /* find a buffer to store the pkt */
1765 pkt = i40e_find_available_buffer(dev);
1769 memset(pkt, 0, I40E_FDIR_PKT_LEN);
1770 ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
1772 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1776 if (hw->mac.type == I40E_MAC_X722) {
1777 /* get translated pctype value in fd pctype register */
1778 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1779 hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1782 ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add,
1785 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1790 if (filter->input.flow_ext.is_flex_flow) {
1792 fdir_info->flex_flow_count[layer_idx]++;
1793 pf->fdir.flex_pit_flag[layer_idx] = 1;
1795 fdir_info->flex_flow_count[layer_idx]--;
1796 if (!fdir_info->flex_flow_count[layer_idx])
1797 pf->fdir.flex_pit_flag[layer_idx] = 0;
1802 fdir_info->fdir_actual_cnt++;
1803 if (fdir_info->fdir_invalprio == 1 &&
1804 fdir_info->fdir_guarantee_free_space > 0)
1805 fdir_info->fdir_guarantee_free_space--;
1807 fdir_info->fdir_actual_cnt--;
1808 if (fdir_info->fdir_invalprio == 1 &&
1809 fdir_info->fdir_guarantee_free_space <
1810 fdir_info->fdir_guarantee_total_space)
1811 fdir_info->fdir_guarantee_free_space++;
1819 i40e_sw_fdir_filter_del(pf, &check_filter.fdir.input);
1821 i40e_sw_fdir_filter_insert(pf, &check_filter);
1827 * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
1828 * Is done by Flow Director Programming Descriptor followed by packet
1829 * structure that contains the filter fields need to match.
1830 * @pf: board private structure
1832 * @filter: fdir filter entry
1833 * @add: 0 - delete, 1 - add
1836 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
1837 enum i40e_filter_pctype pctype,
1838 const struct i40e_fdir_filter_conf *filter,
1839 bool add, bool wait_status)
1841 struct i40e_tx_queue *txq = pf->fdir.txq;
1842 struct i40e_rx_queue *rxq = pf->fdir.rxq;
1843 const struct i40e_fdir_action *fdir_action = &filter->action;
1844 volatile struct i40e_tx_desc *txdp;
1845 volatile struct i40e_filter_program_desc *fdirdp;
1851 PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1852 fdirdp = (volatile struct i40e_filter_program_desc *)
1853 (&txq->tx_ring[txq->tx_tail]);
1855 fdirdp->qindex_flex_ptype_vsi =
1856 rte_cpu_to_le_32((fdir_action->rx_queue <<
1857 I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1858 I40E_TXD_FLTR_QW0_QINDEX_MASK);
1860 fdirdp->qindex_flex_ptype_vsi |=
1861 rte_cpu_to_le_32((fdir_action->flex_off <<
1862 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1863 I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1865 fdirdp->qindex_flex_ptype_vsi |=
1866 rte_cpu_to_le_32((pctype <<
1867 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1868 I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1870 if (filter->input.flow_ext.is_vf)
1871 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1873 /* Use LAN VSI Id by default */
1874 vsi_id = pf->main_vsi->vsi_id;
1875 fdirdp->qindex_flex_ptype_vsi |=
1876 rte_cpu_to_le_32(((uint32_t)vsi_id <<
1877 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1878 I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1880 fdirdp->dtype_cmd_cntindex =
1881 rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1884 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1885 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1886 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1888 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1889 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1890 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1892 if (fdir_action->behavior == I40E_FDIR_REJECT)
1893 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1894 else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
1895 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1896 else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
1897 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1899 PMD_DRV_LOG(ERR, "Failed to program FDIR filter: unsupported fdir behavior.");
1903 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1904 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1905 I40E_TXD_FLTR_QW1_DEST_MASK);
1907 fdirdp->dtype_cmd_cntindex |=
1908 rte_cpu_to_le_32((fdir_action->report_status <<
1909 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
1910 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
1912 fdirdp->dtype_cmd_cntindex |=
1913 rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
1914 fdirdp->dtype_cmd_cntindex |=
1916 ((uint32_t)pf->fdir.match_counter_index <<
1917 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1918 I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
1920 fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
1922 PMD_DRV_LOG(INFO, "filling transmit descriptor.");
1923 txdp = &txq->tx_ring[txq->tx_tail + 1];
1924 txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr[txq->tx_tail >> 1]);
1926 td_cmd = I40E_TX_DESC_CMD_EOP |
1927 I40E_TX_DESC_CMD_RS |
1928 I40E_TX_DESC_CMD_DUMMY;
1930 txdp->cmd_type_offset_bsz =
1931 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
1933 txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
1934 if (txq->tx_tail >= txq->nb_tx_desc)
1936 /* Update the tx tail register */
1939 /* fdir program rx queue cleanup */
1940 i40e_fdir_programming_status_cleanup(rxq);
1942 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
1945 for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
1946 if ((txdp->cmd_type_offset_bsz &
1947 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1948 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1952 if (i >= I40E_FDIR_MAX_WAIT_US) {
1954 "Failed to program FDIR filter: time out to get DD on tx queue.");
1957 /* totally delay 10 ms to check programming status*/
1958 rte_delay_us(I40E_FDIR_MAX_WAIT_US);
1959 if (i40e_check_fdir_programming_status(rxq) < 0) {
1961 "Failed to program FDIR filter: programming status reported.");
1970 * i40e_fdir_flush - clear all filters of Flow Director table
1971 * @pf: board private structure
1974 i40e_fdir_flush(struct rte_eth_dev *dev)
1976 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1977 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1979 uint16_t guarant_cnt, best_cnt;
1982 I40E_WRITE_REG(hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
1983 I40E_WRITE_FLUSH(hw);
1985 for (i = 0; i < I40E_FDIR_FLUSH_RETRY; i++) {
1986 rte_delay_ms(I40E_FDIR_FLUSH_INTERVAL_MS);
1987 reg = I40E_READ_REG(hw, I40E_PFQF_CTL_1);
1988 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
1991 if (i >= I40E_FDIR_FLUSH_RETRY) {
1992 PMD_DRV_LOG(ERR, "FD table did not flush, may need more time.");
1995 guarant_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
1996 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
1997 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
1998 best_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
1999 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2000 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2001 if (guarant_cnt != 0 || best_cnt != 0) {
2002 PMD_DRV_LOG(ERR, "Failed to flush FD table.");
2005 PMD_DRV_LOG(INFO, "FD table Flush success.");
2010 i40e_fdir_info_get_flex_set(struct i40e_pf *pf,
2011 struct rte_eth_flex_payload_cfg *flex_set,
2014 struct i40e_fdir_flex_pit *flex_pit;
2015 struct rte_eth_flex_payload_cfg *ptr = flex_set;
2016 uint16_t src, dst, size, j, k;
2017 uint8_t i, layer_idx;
2019 for (layer_idx = I40E_FLXPLD_L2_IDX;
2020 layer_idx <= I40E_FLXPLD_L4_IDX;
2022 if (layer_idx == I40E_FLXPLD_L2_IDX)
2023 ptr->type = RTE_ETH_L2_PAYLOAD;
2024 else if (layer_idx == I40E_FLXPLD_L3_IDX)
2025 ptr->type = RTE_ETH_L3_PAYLOAD;
2026 else if (layer_idx == I40E_FLXPLD_L4_IDX)
2027 ptr->type = RTE_ETH_L4_PAYLOAD;
2029 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
2030 flex_pit = &pf->fdir.flex_set[layer_idx *
2031 I40E_MAX_FLXPLD_FIED + i];
2032 if (flex_pit->size == 0)
2034 src = flex_pit->src_offset * sizeof(uint16_t);
2035 dst = flex_pit->dst_offset * sizeof(uint16_t);
2036 size = flex_pit->size * sizeof(uint16_t);
2037 for (j = src, k = dst; j < src + size; j++, k++)
2038 ptr->src_offset[k] = j;
2046 i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
2047 struct rte_eth_fdir_flex_mask *flex_mask,
2050 struct i40e_fdir_flex_mask *mask;
2051 struct rte_eth_fdir_flex_mask *ptr = flex_mask;
2054 uint16_t off_bytes, mask_tmp;
2056 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2057 i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
2059 mask = &pf->fdir.flex_mask[i];
2060 flow_type = i40e_pctype_to_flowtype(pf->adapter,
2061 (enum i40e_filter_pctype)i);
2062 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
2065 for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
2066 if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
2067 ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX;
2068 ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX;
2070 ptr->mask[j * sizeof(uint16_t)] = 0x0;
2071 ptr->mask[j * sizeof(uint16_t) + 1] = 0x0;
2074 for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) {
2075 off_bytes = mask->bitmask[j].offset * sizeof(uint16_t);
2076 mask_tmp = ~mask->bitmask[j].mask;
2077 ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp);
2078 ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp);
2080 ptr->flow_type = flow_type;
2087 * i40e_fdir_info_get - get information of Flow Director
2088 * @pf: ethernet device to get info from
2089 * @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with
2090 * the flow director information.
2093 i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
2095 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2096 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2097 uint16_t num_flex_set = 0;
2098 uint16_t num_flex_mask = 0;
2101 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
2102 fdir->mode = RTE_FDIR_MODE_PERFECT;
2104 fdir->mode = RTE_FDIR_MODE_NONE;
2107 (uint32_t)hw->func_caps.fd_filters_guaranteed;
2109 (uint32_t)hw->func_caps.fd_filters_best_effort;
2110 fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
2111 fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
2112 for (i = 1; i < RTE_FLOW_MASK_ARRAY_SIZE; i++)
2113 fdir->flow_types_mask[i] = 0ULL;
2114 fdir->flex_payload_unit = sizeof(uint16_t);
2115 fdir->flex_bitmask_unit = sizeof(uint16_t);
2116 fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
2117 fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF;
2118 fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD;
2120 i40e_fdir_info_get_flex_set(pf,
2121 fdir->flex_conf.flex_set,
2123 i40e_fdir_info_get_flex_mask(pf,
2124 fdir->flex_conf.flex_mask,
2127 fdir->flex_conf.nb_payloads = num_flex_set;
2128 fdir->flex_conf.nb_flexmasks = num_flex_mask;
2132 * i40e_fdir_stat_get - get statistics of Flow Director
2133 * @pf: ethernet device to get info from
2134 * @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with
2135 * the flow director statistics.
2138 i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat)
2140 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2141 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2144 fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2146 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2147 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2149 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2150 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2153 /* Restore flow director filter */
2155 i40e_fdir_filter_restore(struct i40e_pf *pf)
2157 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
2158 struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
2159 struct i40e_fdir_filter *f;
2160 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2162 uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
2163 uint32_t best_cnt; /**< Number of filters in best effort spaces. */
2165 TAILQ_FOREACH(f, fdir_list, rules)
2166 i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
2168 fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2170 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2171 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2173 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2174 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2176 PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d",
2177 guarant_cnt, best_cnt);