4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <rte_ether.h>
43 #include <rte_ethdev.h>
45 #include <rte_memzone.h>
46 #include <rte_malloc.h>
53 #include "i40e_logs.h"
54 #include "base/i40e_type.h"
55 #include "base/i40e_prototype.h"
56 #include "i40e_ethdev.h"
57 #include "i40e_rxtx.h"
59 #define I40E_FDIR_MZ_NAME "FDIR_MEMZONE"
61 #define IPV6_ADDR_LEN 16
64 #define I40E_FDIR_PKT_LEN 512
65 #define I40E_FDIR_IP_DEFAULT_LEN 420
66 #define I40E_FDIR_IP_DEFAULT_TTL 0x40
67 #define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45
68 #define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50
69 #define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60000000
71 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF
72 #define I40E_FDIR_IPv6_PAYLOAD_LEN 380
73 #define I40E_FDIR_UDP_DEFAULT_LEN 400
74 #define I40E_FDIR_GTP_DEFAULT_LEN 384
75 #define I40E_FDIR_INNER_IP_DEFAULT_LEN 384
76 #define I40E_FDIR_INNER_IPV6_DEFAULT_LEN 344
78 #define I40E_FDIR_GTPC_DST_PORT 2123
79 #define I40E_FDIR_GTPU_DST_PORT 2152
80 #define I40E_FDIR_GTP_VER_FLAG_0X30 0x30
81 #define I40E_FDIR_GTP_VER_FLAG_0X32 0x32
82 #define I40E_FDIR_GTP_MSG_TYPE_0X01 0x01
83 #define I40E_FDIR_GTP_MSG_TYPE_0XFF 0xFF
85 /* Wait time for fdir filter programming */
86 #define I40E_FDIR_MAX_WAIT_US 10000
88 /* Wait count and interval for fdir filter flush */
89 #define I40E_FDIR_FLUSH_RETRY 50
90 #define I40E_FDIR_FLUSH_INTERVAL_MS 5
92 #define I40E_COUNTER_PF 2
93 /* Statistic counter index for one pf */
94 #define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF)
96 #define I40E_FDIR_FLOWS ( \
97 (1 << RTE_ETH_FLOW_FRAG_IPV4) | \
98 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
99 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
100 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
101 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
102 (1 << RTE_ETH_FLOW_FRAG_IPV6) | \
103 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
104 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
105 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
106 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
107 (1 << RTE_ETH_FLOW_L2_PAYLOAD))
109 static int i40e_fdir_filter_programming(struct i40e_pf *pf,
110 enum i40e_filter_pctype pctype,
111 const struct rte_eth_fdir_filter *filter,
113 static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
114 struct i40e_fdir_filter *filter);
115 static struct i40e_fdir_filter *
116 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
117 const struct i40e_fdir_input *input);
118 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
119 struct i40e_fdir_filter *filter);
121 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
122 enum i40e_filter_pctype pctype,
123 const struct i40e_fdir_filter_conf *filter,
127 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
129 struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
130 struct i40e_hmc_obj_rxq rx_ctx;
131 int err = I40E_SUCCESS;
133 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
134 /* Init the RX queue in hardware */
135 rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT;
137 rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
138 rx_ctx.qlen = rxq->nb_rx_desc;
139 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
142 rx_ctx.dtype = i40e_header_split_none;
143 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
144 rx_ctx.rxmax = ETHER_MAX_LEN;
145 rx_ctx.tphrdesc_ena = 1;
146 rx_ctx.tphwdesc_ena = 1;
147 rx_ctx.tphdata_ena = 1;
148 rx_ctx.tphhead_ena = 1;
149 rx_ctx.lrxqthresh = 2;
155 err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx);
156 if (err != I40E_SUCCESS) {
157 PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context.");
160 err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx);
161 if (err != I40E_SUCCESS) {
162 PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context.");
165 rxq->qrx_tail = hw->hw_addr +
166 I40E_QRX_TAIL(rxq->vsi->base_queue);
169 /* Init the RX tail regieter. */
170 I40E_PCI_REG_WRITE(rxq->qrx_tail, 0);
171 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
177 * i40e_fdir_setup - reserve and initialize the Flow Director resources
178 * @pf: board private structure
181 i40e_fdir_setup(struct i40e_pf *pf)
183 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
184 struct i40e_vsi *vsi;
185 int err = I40E_SUCCESS;
186 char z_name[RTE_MEMZONE_NAMESIZE];
187 const struct rte_memzone *mz = NULL;
188 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
190 if ((pf->flags & I40E_FLAG_FDIR) == 0) {
191 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
192 return I40E_NOT_SUPPORTED;
195 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u,"
196 " num_filters_best_effort = %u.",
197 hw->func_caps.fd_filters_guaranteed,
198 hw->func_caps.fd_filters_best_effort);
200 vsi = pf->fdir.fdir_vsi;
202 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
205 /* make new FDIR VSI */
206 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
208 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
209 return I40E_ERR_NO_AVAILABLE_VSI;
211 pf->fdir.fdir_vsi = vsi;
213 /*Fdir tx queue setup*/
214 err = i40e_fdir_setup_tx_resources(pf);
216 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
220 /*Fdir rx queue setup*/
221 err = i40e_fdir_setup_rx_resources(pf);
223 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
227 err = i40e_tx_queue_init(pf->fdir.txq);
229 PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization.");
233 /* need switch on before dev start*/
234 err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE);
236 PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on.");
240 /* Init the rx queue in hardware */
241 err = i40e_fdir_rx_queue_init(pf->fdir.rxq);
243 PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization.");
247 /* switch on rx queue */
248 err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE);
250 PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on.");
254 /* reserve memory for the fdir programming packet */
255 snprintf(z_name, sizeof(z_name), "%s_%s_%d",
256 eth_dev->device->driver->name,
258 eth_dev->data->port_id);
259 mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
261 PMD_DRV_LOG(ERR, "Cannot init memzone for "
262 "flow director program packet.");
263 err = I40E_ERR_NO_MEMORY;
266 pf->fdir.prg_pkt = mz->addr;
267 pf->fdir.dma_addr = mz->phys_addr;
269 pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
270 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
275 i40e_dev_rx_queue_release(pf->fdir.rxq);
278 i40e_dev_tx_queue_release(pf->fdir.txq);
281 i40e_vsi_release(vsi);
282 pf->fdir.fdir_vsi = NULL;
287 * i40e_fdir_teardown - release the Flow Director resources
288 * @pf: board private structure
291 i40e_fdir_teardown(struct i40e_pf *pf)
293 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
294 struct i40e_vsi *vsi;
296 vsi = pf->fdir.fdir_vsi;
299 int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
301 PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off");
302 err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
304 PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
305 i40e_dev_rx_queue_release(pf->fdir.rxq);
307 i40e_dev_tx_queue_release(pf->fdir.txq);
309 i40e_vsi_release(vsi);
310 pf->fdir.fdir_vsi = NULL;
313 /* check whether the flow director table in empty */
315 i40e_fdir_empty(struct i40e_hw *hw)
317 uint32_t guarant_cnt, best_cnt;
319 guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
320 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
321 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
322 best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
323 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
324 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
325 if (best_cnt + guarant_cnt > 0)
332 * Initialize the configuration about bytes stream extracted as flexible payload
336 i40e_init_flx_pld(struct i40e_pf *pf)
338 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
344 * Define the bytes stream extracted as flexible payload in
345 * field vector. By default, select 8 words from the beginning
346 * of payload as flexible payload.
348 for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) {
349 index = i * I40E_MAX_FLXPLD_FIED;
350 pf->fdir.flex_set[index].src_offset = 0;
351 pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM;
352 pf->fdir.flex_set[index].dst_offset = 0;
353 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900);
355 I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/
357 I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/
360 /* initialize the masks */
361 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
362 pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
363 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
365 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
367 pf->fdir.flex_mask[pctype].word_mask = 0;
368 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
369 for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
370 pf->fdir.flex_mask[pctype].bitmask[i].offset = 0;
371 pf->fdir.flex_mask[pctype].bitmask[i].mask = 0;
372 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0);
377 #define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
378 if ((flex_pit2).src_offset < \
379 (flex_pit1).src_offset + (flex_pit1).size) { \
380 PMD_DRV_LOG(ERR, "src_offset should be not" \
381 " less than than previous offset" \
382 " + previous FSIZE."); \
388 * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure,
389 * and the flex_pit will be sorted by it's src_offset value
391 static inline uint16_t
392 i40e_srcoff_to_flx_pit(const uint16_t *src_offset,
393 struct i40e_fdir_flex_pit *flex_pit)
395 uint16_t src_tmp, size, num = 0;
396 uint16_t i, k, j = 0;
398 while (j < I40E_FDIR_MAX_FLEX_LEN) {
400 for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) {
401 if (src_offset[j + 1] == src_offset[j] + 1)
406 src_tmp = src_offset[j] + 1 - size;
407 /* the flex_pit need to be sort by src_offset */
408 for (i = 0; i < num; i++) {
409 if (src_tmp < flex_pit[i].src_offset)
412 /* if insert required, move backward */
413 for (k = num; k > i; k--)
414 flex_pit[k] = flex_pit[k - 1];
416 flex_pit[i].dst_offset = j + 1 - size;
417 flex_pit[i].src_offset = src_tmp;
418 flex_pit[i].size = size;
425 /* i40e_check_fdir_flex_payload -check flex payload configuration arguments */
427 i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
429 struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN];
432 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
433 if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
434 PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
439 memset(flex_pit, 0, sizeof(flex_pit));
440 num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
441 if (num > I40E_MAX_FLXPLD_FIED) {
442 PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
445 for (i = 0; i < num; i++) {
446 if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 ||
447 flex_pit[i].src_offset & 0x01) {
448 PMD_DRV_LOG(ERR, "flexpayload should be measured"
453 I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]);
459 * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration
460 * arguments are valid
463 i40e_check_fdir_flex_conf(const struct i40e_adapter *adapter,
464 const struct rte_eth_fdir_flex_conf *conf)
466 const struct rte_eth_flex_payload_cfg *flex_cfg;
467 const struct rte_eth_fdir_flex_mask *flex_mask;
472 enum i40e_filter_pctype pctype;
475 PMD_DRV_LOG(INFO, "NULL pointer.");
478 /* check flexible payload setting configuration */
479 if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) {
480 PMD_DRV_LOG(ERR, "invalid number of payload setting.");
483 for (i = 0; i < conf->nb_payloads; i++) {
484 flex_cfg = &conf->flex_set[i];
485 if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) {
486 PMD_DRV_LOG(ERR, "invalid payload type.");
489 ret = i40e_check_fdir_flex_payload(flex_cfg);
491 PMD_DRV_LOG(ERR, "invalid flex payload arguments.");
496 /* check flex mask setting configuration */
497 if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) {
498 PMD_DRV_LOG(ERR, "invalid number of flex masks.");
501 for (i = 0; i < conf->nb_flexmasks; i++) {
502 flex_mask = &conf->flex_mask[i];
503 pctype = i40e_flowtype_to_pctype(adapter, flex_mask->flow_type);
504 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
505 PMD_DRV_LOG(WARNING, "invalid flow type.");
509 for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) {
510 mask_tmp = I40E_WORD(flex_mask->mask[j],
511 flex_mask->mask[j + 1]);
512 if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) {
514 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) {
515 PMD_DRV_LOG(ERR, " exceed maximal"
516 " number of bitmasks.");
526 * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload
527 * @pf: board private structure
528 * @cfg: the rule how bytes stream is extracted as flexible payload
531 i40e_set_flx_pld_cfg(struct i40e_pf *pf,
532 const struct rte_eth_flex_payload_cfg *cfg)
534 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
535 struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
537 uint16_t num, min_next_off; /* in words */
538 uint8_t field_idx = 0;
539 uint8_t layer_idx = 0;
542 if (cfg->type == RTE_ETH_L2_PAYLOAD)
543 layer_idx = I40E_FLXPLD_L2_IDX;
544 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
545 layer_idx = I40E_FLXPLD_L3_IDX;
546 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
547 layer_idx = I40E_FLXPLD_L4_IDX;
549 memset(flex_pit, 0, sizeof(flex_pit));
550 num = i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit);
552 for (i = 0; i < RTE_MIN(num, RTE_DIM(flex_pit)); i++) {
553 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
554 /* record the info in fdir structure */
555 pf->fdir.flex_set[field_idx].src_offset =
556 flex_pit[i].src_offset / sizeof(uint16_t);
557 pf->fdir.flex_set[field_idx].size =
558 flex_pit[i].size / sizeof(uint16_t);
559 pf->fdir.flex_set[field_idx].dst_offset =
560 flex_pit[i].dst_offset / sizeof(uint16_t);
561 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
562 pf->fdir.flex_set[field_idx].size,
563 pf->fdir.flex_set[field_idx].dst_offset);
565 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
567 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
568 pf->fdir.flex_set[field_idx].size;
570 for (; i < I40E_MAX_FLXPLD_FIED; i++) {
571 /* set the non-used register obeying register's constrain */
572 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
573 NONUSE_FLX_PIT_DEST_OFF);
575 I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i),
582 * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload
583 * @pf: board private structure
584 * @pctype: packet classify type
585 * @flex_masks: mask for flexible payload
588 i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
589 enum i40e_filter_pctype pctype,
590 const struct rte_eth_fdir_flex_mask *mask_cfg)
592 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
593 struct i40e_fdir_flex_mask *flex_mask;
594 uint32_t flxinset, fd_mask;
596 uint8_t i, nb_bitmask = 0;
598 flex_mask = &pf->fdir.flex_mask[pctype];
599 memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
600 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
601 mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]);
602 if (mask_tmp != 0x0) {
603 flex_mask->word_mask |=
604 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
605 if (mask_tmp != UINT16_MAX) {
607 flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp;
608 flex_mask->bitmask[nb_bitmask].offset =
609 i / sizeof(uint16_t);
614 /* write mask to hw */
615 flxinset = (flex_mask->word_mask <<
616 I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
617 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
618 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
620 for (i = 0; i < nb_bitmask; i++) {
621 fd_mask = (flex_mask->bitmask[i].mask <<
622 I40E_PRTQF_FD_MSK_MASK_SHIFT) &
623 I40E_PRTQF_FD_MSK_MASK_MASK;
624 fd_mask |= ((flex_mask->bitmask[i].offset +
625 I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
626 I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
627 I40E_PRTQF_FD_MSK_OFFSET_MASK;
628 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
633 * Configure flow director related setting
636 i40e_fdir_configure(struct rte_eth_dev *dev)
638 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
639 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
640 struct rte_eth_fdir_flex_conf *conf;
641 enum i40e_filter_pctype pctype;
647 * configuration need to be done before
648 * flow director filters are added
649 * If filters exist, flush them.
651 if (i40e_fdir_empty(hw) < 0) {
652 ret = i40e_fdir_flush(dev);
654 PMD_DRV_LOG(ERR, "failed to flush fdir table.");
659 /* enable FDIR filter */
660 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
661 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
662 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
664 i40e_init_flx_pld(pf); /* set flex config to default value */
666 conf = &dev->data->dev_conf.fdir_conf.flex_conf;
667 ret = i40e_check_fdir_flex_conf(pf->adapter, conf);
669 PMD_DRV_LOG(ERR, " invalid configuration arguments.");
672 /* configure flex payload */
673 for (i = 0; i < conf->nb_payloads; i++)
674 i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
675 /* configure flex mask*/
676 for (i = 0; i < conf->nb_flexmasks; i++) {
677 if (hw->mac.type == I40E_MAC_X722) {
678 /* get translated pctype value in fd pctype register */
679 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
680 hw, I40E_GLQF_FD_PCTYPES(
681 (int)i40e_flowtype_to_pctype(pf->adapter,
682 conf->flex_mask[i].flow_type)));
684 pctype = i40e_flowtype_to_pctype(pf->adapter,
685 conf->flex_mask[i].flow_type);
687 i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]);
694 i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
695 unsigned char *raw_pkt,
698 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
699 uint16_t *ether_type;
700 uint8_t len = 2 * sizeof(struct ether_addr);
702 struct ipv6_hdr *ip6;
703 static const uint8_t next_proto[] = {
704 [RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP,
705 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
706 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
707 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = IPPROTO_SCTP,
708 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = IPPROTO_IP,
709 [RTE_ETH_FLOW_FRAG_IPV6] = IPPROTO_NONE,
710 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
711 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
712 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = IPPROTO_SCTP,
713 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = IPPROTO_NONE,
716 raw_pkt += 2 * sizeof(struct ether_addr);
717 if (vlan && fdir_input->flow_ext.vlan_tci) {
718 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
719 rte_memcpy(raw_pkt + sizeof(uint16_t),
720 &fdir_input->flow_ext.vlan_tci,
722 raw_pkt += sizeof(vlan_frame);
723 len += sizeof(vlan_frame);
725 ether_type = (uint16_t *)raw_pkt;
726 raw_pkt += sizeof(uint16_t);
727 len += sizeof(uint16_t);
729 switch (fdir_input->flow_type) {
730 case RTE_ETH_FLOW_L2_PAYLOAD:
731 *ether_type = fdir_input->flow.l2_flow.ether_type;
733 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
734 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
735 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
736 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
737 case RTE_ETH_FLOW_FRAG_IPV4:
738 ip = (struct ipv4_hdr *)raw_pkt;
740 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
741 ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
742 /* set len to by default */
743 ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
744 ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
745 fdir_input->flow.ip4_flow.proto :
746 next_proto[fdir_input->flow_type];
747 ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
748 fdir_input->flow.ip4_flow.ttl :
749 I40E_FDIR_IP_DEFAULT_TTL;
750 ip->type_of_service = fdir_input->flow.ip4_flow.tos;
752 * The source and destination fields in the transmitted packet
753 * need to be presented in a reversed order with respect
754 * to the expected received packets.
756 ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
757 ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
758 len += sizeof(struct ipv4_hdr);
760 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
761 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
762 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
763 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
764 case RTE_ETH_FLOW_FRAG_IPV6:
765 ip6 = (struct ipv6_hdr *)raw_pkt;
767 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
769 rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
770 (fdir_input->flow.ipv6_flow.tc <<
771 I40E_FDIR_IPv6_TC_OFFSET));
773 rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
774 ip6->proto = fdir_input->flow.ipv6_flow.proto ?
775 fdir_input->flow.ipv6_flow.proto :
776 next_proto[fdir_input->flow_type];
777 ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
778 fdir_input->flow.ipv6_flow.hop_limits :
779 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
781 * The source and destination fields in the transmitted packet
782 * need to be presented in a reversed order with respect
783 * to the expected received packets.
785 rte_memcpy(&(ip6->src_addr),
786 &(fdir_input->flow.ipv6_flow.dst_ip),
788 rte_memcpy(&(ip6->dst_addr),
789 &(fdir_input->flow.ipv6_flow.src_ip),
791 len += sizeof(struct ipv6_hdr);
794 PMD_DRV_LOG(ERR, "unknown flow type %u.",
795 fdir_input->flow_type);
803 * i40e_fdir_construct_pkt - construct packet based on fields in input
804 * @pf: board private structure
805 * @fdir_input: input set of the flow director entry
806 * @raw_pkt: a packet to be constructed
809 i40e_fdir_construct_pkt(struct i40e_pf *pf,
810 const struct rte_eth_fdir_input *fdir_input,
811 unsigned char *raw_pkt)
813 unsigned char *payload, *ptr;
816 struct sctp_hdr *sctp;
817 uint8_t size, dst = 0;
818 uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
821 /* fill the ethernet and IP head */
822 len = i40e_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
823 !!fdir_input->flow_ext.vlan_tci);
827 /* fill the L4 head */
828 switch (fdir_input->flow_type) {
829 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
830 udp = (struct udp_hdr *)(raw_pkt + len);
831 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
833 * The source and destination fields in the transmitted packet
834 * need to be presented in a reversed order with respect
835 * to the expected received packets.
837 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
838 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
839 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
842 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
843 tcp = (struct tcp_hdr *)(raw_pkt + len);
844 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
846 * The source and destination fields in the transmitted packet
847 * need to be presented in a reversed order with respect
848 * to the expected received packets.
850 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
851 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
852 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
855 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
856 sctp = (struct sctp_hdr *)(raw_pkt + len);
857 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
859 * The source and destination fields in the transmitted packet
860 * need to be presented in a reversed order with respect
861 * to the expected received packets.
863 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
864 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
865 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
868 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
869 case RTE_ETH_FLOW_FRAG_IPV4:
870 payload = raw_pkt + len;
871 set_idx = I40E_FLXPLD_L3_IDX;
874 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
875 udp = (struct udp_hdr *)(raw_pkt + len);
876 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
878 * The source and destination fields in the transmitted packet
879 * need to be presented in a reversed order with respect
880 * to the expected received packets.
882 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
883 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
884 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
887 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
888 tcp = (struct tcp_hdr *)(raw_pkt + len);
889 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
891 * The source and destination fields in the transmitted packet
892 * need to be presented in a reversed order with respect
893 * to the expected received packets.
895 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
896 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
897 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
900 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
901 sctp = (struct sctp_hdr *)(raw_pkt + len);
902 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
904 * The source and destination fields in the transmitted packet
905 * need to be presented in a reversed order with respect
906 * to the expected received packets.
908 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
909 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
910 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
913 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
914 case RTE_ETH_FLOW_FRAG_IPV6:
915 payload = raw_pkt + len;
916 set_idx = I40E_FLXPLD_L3_IDX;
918 case RTE_ETH_FLOW_L2_PAYLOAD:
919 payload = raw_pkt + len;
921 * ARP packet is a special case on which the payload
922 * starts after the whole ARP header
924 if (fdir_input->flow.l2_flow.ether_type ==
925 rte_cpu_to_be_16(ETHER_TYPE_ARP))
926 payload += sizeof(struct arp_hdr);
927 set_idx = I40E_FLXPLD_L2_IDX;
930 PMD_DRV_LOG(ERR, "unknown flow type %u.", fdir_input->flow_type);
934 /* fill the flexbytes to payload */
935 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
936 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
937 size = pf->fdir.flex_set[pit_idx].size;
940 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
942 pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
944 &fdir_input->flow_ext.flexbytes[dst],
945 size * sizeof(uint16_t));
951 static struct i40e_customized_pctype *
952 i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype)
954 struct i40e_customized_pctype *cus_pctype;
955 enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC;
957 for (; i < I40E_CUSTOMIZED_MAX; i++) {
958 cus_pctype = &pf->customized_pctype[i];
959 if (pctype == cus_pctype->pctype)
966 i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
967 const struct i40e_fdir_input *fdir_input,
968 unsigned char *raw_pkt,
971 struct i40e_customized_pctype *cus_pctype = NULL;
972 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
973 uint16_t *ether_type;
974 uint8_t len = 2 * sizeof(struct ether_addr);
976 struct ipv6_hdr *ip6;
977 uint8_t pctype = fdir_input->pctype;
978 bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
979 static const uint8_t next_proto[] = {
980 [I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
981 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
982 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
983 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
984 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
985 [I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
986 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
987 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
988 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
989 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
992 raw_pkt += 2 * sizeof(struct ether_addr);
993 if (vlan && fdir_input->flow_ext.vlan_tci) {
994 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
995 rte_memcpy(raw_pkt + sizeof(uint16_t),
996 &fdir_input->flow_ext.vlan_tci,
998 raw_pkt += sizeof(vlan_frame);
999 len += sizeof(vlan_frame);
1001 ether_type = (uint16_t *)raw_pkt;
1002 raw_pkt += sizeof(uint16_t);
1003 len += sizeof(uint16_t);
1005 if (is_customized_pctype) {
1006 cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
1008 PMD_DRV_LOG(ERR, "unknown pctype %u.",
1009 fdir_input->pctype);
1014 if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
1015 *ether_type = fdir_input->flow.l2_flow.ether_type;
1016 else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
1017 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
1018 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
1019 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1020 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
1021 is_customized_pctype) {
1022 ip = (struct ipv4_hdr *)raw_pkt;
1024 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
1025 ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
1026 /* set len to by default */
1027 ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
1028 ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
1029 fdir_input->flow.ip4_flow.ttl :
1030 I40E_FDIR_IP_DEFAULT_TTL;
1031 ip->type_of_service = fdir_input->flow.ip4_flow.tos;
1033 * The source and destination fields in the transmitted packet
1034 * need to be presented in a reversed order with respect
1035 * to the expected received packets.
1037 ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
1038 ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
1040 if (!is_customized_pctype)
1041 ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
1042 fdir_input->flow.ip4_flow.proto :
1043 next_proto[fdir_input->pctype];
1044 else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1045 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1046 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1047 cus_pctype->index == I40E_CUSTOMIZED_GTPU)
1048 ip->next_proto_id = IPPROTO_UDP;
1049 len += sizeof(struct ipv4_hdr);
1050 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
1051 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
1052 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
1053 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1054 pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
1055 ip6 = (struct ipv6_hdr *)raw_pkt;
1057 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
1059 rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
1060 (fdir_input->flow.ipv6_flow.tc <<
1061 I40E_FDIR_IPv6_TC_OFFSET));
1063 rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
1064 ip6->proto = fdir_input->flow.ipv6_flow.proto ?
1065 fdir_input->flow.ipv6_flow.proto :
1066 next_proto[fdir_input->pctype];
1067 ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
1068 fdir_input->flow.ipv6_flow.hop_limits :
1069 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
1071 * The source and destination fields in the transmitted packet
1072 * need to be presented in a reversed order with respect
1073 * to the expected received packets.
1075 rte_memcpy(&ip6->src_addr,
1076 &fdir_input->flow.ipv6_flow.dst_ip,
1078 rte_memcpy(&ip6->dst_addr,
1079 &fdir_input->flow.ipv6_flow.src_ip,
1081 len += sizeof(struct ipv6_hdr);
1083 PMD_DRV_LOG(ERR, "unknown pctype %u.",
1084 fdir_input->pctype);
1092 * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
1093 * @pf: board private structure
1094 * @fdir_input: input set of the flow director entry
1095 * @raw_pkt: a packet to be constructed
1098 i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
1099 const struct i40e_fdir_input *fdir_input,
1100 unsigned char *raw_pkt)
1102 unsigned char *payload = NULL;
1104 struct udp_hdr *udp;
1105 struct tcp_hdr *tcp;
1106 struct sctp_hdr *sctp;
1107 struct rte_flow_item_gtp *gtp;
1108 struct ipv4_hdr *gtp_ipv4;
1109 struct ipv6_hdr *gtp_ipv6;
1110 uint8_t size, dst = 0;
1111 uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
1113 uint8_t pctype = fdir_input->pctype;
1114 struct i40e_customized_pctype *cus_pctype;
1116 /* fill the ethernet and IP head */
1117 len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
1118 !!fdir_input->flow_ext.vlan_tci);
1122 /* fill the L4 head */
1123 if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
1124 udp = (struct udp_hdr *)(raw_pkt + len);
1125 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
1127 * The source and destination fields in the transmitted packet
1128 * need to be presented in a reversed order with respect
1129 * to the expected received packets.
1131 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
1132 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
1133 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1134 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
1135 tcp = (struct tcp_hdr *)(raw_pkt + len);
1136 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
1138 * The source and destination fields in the transmitted packet
1139 * need to be presented in a reversed order with respect
1140 * to the expected received packets.
1142 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
1143 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
1144 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1145 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
1146 sctp = (struct sctp_hdr *)(raw_pkt + len);
1147 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
1149 * The source and destination fields in the transmitted packet
1150 * need to be presented in a reversed order with respect
1151 * to the expected received packets.
1153 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
1154 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
1155 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
1156 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1157 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
1158 payload = raw_pkt + len;
1159 set_idx = I40E_FLXPLD_L3_IDX;
1160 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
1161 udp = (struct udp_hdr *)(raw_pkt + len);
1162 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
1164 * The source and destination fields in the transmitted packet
1165 * need to be presented in a reversed order with respect
1166 * to the expected received packets.
1168 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
1169 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
1170 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
1171 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
1172 tcp = (struct tcp_hdr *)(raw_pkt + len);
1173 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
1175 * The source and destination fields in the transmitted packet
1176 * need to be presented in a reversed order with respect
1177 * to the expected received packets.
1179 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1180 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
1181 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
1182 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
1183 sctp = (struct sctp_hdr *)(raw_pkt + len);
1184 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
1186 * The source and destination fields in the transmitted packet
1187 * need to be presented in a reversed order with respect
1188 * to the expected received packets.
1190 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
1191 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
1192 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
1193 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1194 pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
1195 payload = raw_pkt + len;
1196 set_idx = I40E_FLXPLD_L3_IDX;
1197 } else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1198 payload = raw_pkt + len;
1200 * ARP packet is a special case on which the payload
1201 * starts after the whole ARP header
1203 if (fdir_input->flow.l2_flow.ether_type ==
1204 rte_cpu_to_be_16(ETHER_TYPE_ARP))
1205 payload += sizeof(struct arp_hdr);
1206 set_idx = I40E_FLXPLD_L2_IDX;
1207 } else if (fdir_input->flow_ext.customized_pctype) {
1208 /* If customized pctype is used */
1209 cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
1210 if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1211 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1212 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1213 cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
1214 udp = (struct udp_hdr *)(raw_pkt + len);
1216 rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1218 gtp = (struct rte_flow_item_gtp *)
1219 ((unsigned char *)udp + sizeof(struct udp_hdr));
1221 rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
1222 gtp->teid = fdir_input->flow.gtp_flow.teid;
1223 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0X01;
1225 /* GTP-C message type is not supported. */
1226 if (cus_pctype->index == I40E_CUSTOMIZED_GTPC) {
1228 rte_cpu_to_be_16(I40E_FDIR_GTPC_DST_PORT);
1229 gtp->v_pt_rsv_flags =
1230 I40E_FDIR_GTP_VER_FLAG_0X32;
1233 rte_cpu_to_be_16(I40E_FDIR_GTPU_DST_PORT);
1234 gtp->v_pt_rsv_flags =
1235 I40E_FDIR_GTP_VER_FLAG_0X30;
1238 if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
1239 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1240 gtp_ipv4 = (struct ipv4_hdr *)
1241 ((unsigned char *)gtp +
1242 sizeof(struct rte_flow_item_gtp));
1243 gtp_ipv4->version_ihl =
1244 I40E_FDIR_IP_DEFAULT_VERSION_IHL;
1245 gtp_ipv4->next_proto_id = IPPROTO_IP;
1246 gtp_ipv4->total_length =
1248 I40E_FDIR_INNER_IP_DEFAULT_LEN);
1249 payload = (unsigned char *)gtp_ipv4 +
1250 sizeof(struct ipv4_hdr);
1251 } else if (cus_pctype->index ==
1252 I40E_CUSTOMIZED_GTPU_IPV6) {
1253 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1254 gtp_ipv6 = (struct ipv6_hdr *)
1255 ((unsigned char *)gtp +
1256 sizeof(struct rte_flow_item_gtp));
1257 gtp_ipv6->vtc_flow =
1259 I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
1260 (0 << I40E_FDIR_IPv6_TC_OFFSET));
1261 gtp_ipv6->proto = IPPROTO_NONE;
1262 gtp_ipv6->payload_len =
1264 I40E_FDIR_INNER_IPV6_DEFAULT_LEN);
1265 gtp_ipv6->hop_limits =
1266 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
1267 payload = (unsigned char *)gtp_ipv6 +
1268 sizeof(struct ipv6_hdr);
1270 payload = (unsigned char *)gtp +
1271 sizeof(struct rte_flow_item_gtp);
1274 PMD_DRV_LOG(ERR, "unknown pctype %u.",
1275 fdir_input->pctype);
1279 /* fill the flexbytes to payload */
1280 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
1281 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
1282 size = pf->fdir.flex_set[pit_idx].size;
1285 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
1287 pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
1288 (void)rte_memcpy(ptr,
1289 &fdir_input->flow_ext.flexbytes[dst],
1290 size * sizeof(uint16_t));
1296 /* Construct the tx flags */
1297 static inline uint64_t
1298 i40e_build_ctob(uint32_t td_cmd,
1303 return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
1304 ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
1305 ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
1306 ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
1307 ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
1311 * check the programming status descriptor in rx queue.
1312 * done after Programming Flow Director is programmed on
1316 i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
1318 volatile union i40e_rx_desc *rxdp;
1325 rxdp = &rxq->rx_ring[rxq->rx_tail];
1326 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1327 rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
1328 >> I40E_RXD_QW1_STATUS_SHIFT;
1330 if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
1331 len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT;
1332 id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1333 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1335 if (len == I40E_RX_PROG_STATUS_DESC_LENGTH &&
1336 id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) {
1338 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
1339 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
1340 if (error == (0x1 <<
1341 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
1342 PMD_DRV_LOG(ERR, "Failed to add FDIR filter"
1343 " (FD_ID %u): programming status"
1345 rxdp->wb.qword0.hi_dword.fd_id);
1347 } else if (error == (0x1 <<
1348 I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
1349 PMD_DRV_LOG(ERR, "Failed to delete FDIR filter"
1350 " (FD_ID %u): programming status"
1352 rxdp->wb.qword0.hi_dword.fd_id);
1355 PMD_DRV_LOG(ERR, "invalid programming status"
1356 " reported, error = %u.", error);
1358 PMD_DRV_LOG(ERR, "unknown programming status"
1359 " reported, len = %d, id = %u.", len, id);
1360 rxdp->wb.qword1.status_error_len = 0;
1362 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
1369 i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
1370 struct i40e_fdir_filter *filter)
1372 rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
1376 /* Check if there exists the flow director filter */
1377 static struct i40e_fdir_filter *
1378 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
1379 const struct i40e_fdir_input *input)
1383 ret = rte_hash_lookup(fdir_info->hash_table, (const void *)input);
1387 return fdir_info->hash_map[ret];
1390 /* Add a flow director filter into the SW list */
1392 i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
1394 struct i40e_fdir_info *fdir_info = &pf->fdir;
1397 ret = rte_hash_add_key(fdir_info->hash_table,
1398 &filter->fdir.input);
1401 "Failed to insert fdir filter to hash table %d!",
1405 fdir_info->hash_map[ret] = filter;
1407 TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
1412 /* Delete a flow director filter from the SW list */
1414 i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
1416 struct i40e_fdir_info *fdir_info = &pf->fdir;
1417 struct i40e_fdir_filter *filter;
1420 ret = rte_hash_del_key(fdir_info->hash_table, input);
1423 "Failed to delete fdir filter to hash table %d!",
1427 filter = fdir_info->hash_map[ret];
1428 fdir_info->hash_map[ret] = NULL;
1430 TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
1437 * i40e_add_del_fdir_filter - add or remove a flow director filter.
1438 * @pf: board private structure
1439 * @filter: fdir filter entry
1440 * @add: 0 - delete, 1 - add
1443 i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
1444 const struct rte_eth_fdir_filter *filter,
1447 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1448 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1449 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1450 enum i40e_filter_pctype pctype;
1453 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1454 PMD_DRV_LOG(ERR, "FDIR is not enabled, please"
1455 " check the mode in fdir_conf.");
1459 pctype = i40e_flowtype_to_pctype(pf->adapter, filter->input.flow_type);
1460 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
1461 PMD_DRV_LOG(ERR, "invalid flow_type input.");
1464 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1465 PMD_DRV_LOG(ERR, "Invalid queue ID");
1468 if (filter->input.flow_ext.is_vf &&
1469 filter->input.flow_ext.dst_id >= pf->vf_num) {
1470 PMD_DRV_LOG(ERR, "Invalid VF ID");
1474 memset(pkt, 0, I40E_FDIR_PKT_LEN);
1476 ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
1478 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1482 if (hw->mac.type == I40E_MAC_X722) {
1483 /* get translated pctype value in fd pctype register */
1484 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1485 hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1488 ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
1490 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1499 * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
1500 * @pf: board private structure
1501 * @filter: fdir filter entry
1502 * @add: 0 - delete, 1 - add
1505 i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
1506 const struct i40e_fdir_filter_conf *filter,
1509 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1510 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1511 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1512 enum i40e_filter_pctype pctype;
1513 struct i40e_fdir_info *fdir_info = &pf->fdir;
1514 struct i40e_fdir_filter *fdir_filter, *node;
1515 struct i40e_fdir_filter check_filter; /* Check if the filter exists */
1518 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1519 PMD_DRV_LOG(ERR, "FDIR is not enabled, please check the mode in fdir_conf.");
1523 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1524 PMD_DRV_LOG(ERR, "Invalid queue ID");
1527 if (filter->input.flow_ext.is_vf &&
1528 filter->input.flow_ext.dst_id >= pf->vf_num) {
1529 PMD_DRV_LOG(ERR, "Invalid VF ID");
1533 /* Check if there is the filter in SW list */
1534 memset(&check_filter, 0, sizeof(check_filter));
1535 i40e_fdir_filter_convert(filter, &check_filter);
1536 node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
1539 "Conflict with existing flow director rules!");
1543 if (!add && !node) {
1545 "There's no corresponding flow firector filter!");
1549 memset(pkt, 0, I40E_FDIR_PKT_LEN);
1551 ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
1553 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1557 if (hw->mac.type == I40E_MAC_X722) {
1558 /* get translated pctype value in fd pctype register */
1559 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1560 hw, I40E_GLQF_FD_PCTYPES(
1561 (int)filter->input.pctype));
1563 pctype = filter->input.pctype;
1565 ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add);
1567 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1573 fdir_filter = rte_zmalloc("fdir_filter",
1574 sizeof(*fdir_filter), 0);
1575 rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
1576 ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
1578 ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
1585 * i40e_fdir_filter_programming - Program a flow director filter rule.
1586 * Is done by Flow Director Programming Descriptor followed by packet
1587 * structure that contains the filter fields need to match.
1588 * @pf: board private structure
1590 * @filter: fdir filter entry
1591 * @add: 0 - delete, 1 - add
1594 i40e_fdir_filter_programming(struct i40e_pf *pf,
1595 enum i40e_filter_pctype pctype,
1596 const struct rte_eth_fdir_filter *filter,
1599 struct i40e_tx_queue *txq = pf->fdir.txq;
1600 struct i40e_rx_queue *rxq = pf->fdir.rxq;
1601 const struct rte_eth_fdir_action *fdir_action = &filter->action;
1602 volatile struct i40e_tx_desc *txdp;
1603 volatile struct i40e_filter_program_desc *fdirdp;
1608 PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1609 fdirdp = (volatile struct i40e_filter_program_desc *)
1610 (&(txq->tx_ring[txq->tx_tail]));
1612 fdirdp->qindex_flex_ptype_vsi =
1613 rte_cpu_to_le_32((fdir_action->rx_queue <<
1614 I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1615 I40E_TXD_FLTR_QW0_QINDEX_MASK);
1617 fdirdp->qindex_flex_ptype_vsi |=
1618 rte_cpu_to_le_32((fdir_action->flex_off <<
1619 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1620 I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1622 fdirdp->qindex_flex_ptype_vsi |=
1623 rte_cpu_to_le_32((pctype <<
1624 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1625 I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1627 if (filter->input.flow_ext.is_vf)
1628 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1630 /* Use LAN VSI Id by default */
1631 vsi_id = pf->main_vsi->vsi_id;
1632 fdirdp->qindex_flex_ptype_vsi |=
1633 rte_cpu_to_le_32(((uint32_t)vsi_id <<
1634 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1635 I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1637 fdirdp->dtype_cmd_cntindex =
1638 rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1641 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1642 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1643 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1645 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1646 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1647 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1649 if (fdir_action->behavior == RTE_ETH_FDIR_REJECT)
1650 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1651 else if (fdir_action->behavior == RTE_ETH_FDIR_ACCEPT)
1652 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1653 else if (fdir_action->behavior == RTE_ETH_FDIR_PASSTHRU)
1654 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1656 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1657 " unsupported fdir behavior.");
1661 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1662 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1663 I40E_TXD_FLTR_QW1_DEST_MASK);
1665 fdirdp->dtype_cmd_cntindex |=
1666 rte_cpu_to_le_32((fdir_action->report_status<<
1667 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
1668 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
1670 fdirdp->dtype_cmd_cntindex |=
1671 rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
1672 fdirdp->dtype_cmd_cntindex |=
1674 ((uint32_t)pf->fdir.match_counter_index <<
1675 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1676 I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
1678 fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
1680 PMD_DRV_LOG(INFO, "filling transmit descriptor.");
1681 txdp = &(txq->tx_ring[txq->tx_tail + 1]);
1682 txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
1683 td_cmd = I40E_TX_DESC_CMD_EOP |
1684 I40E_TX_DESC_CMD_RS |
1685 I40E_TX_DESC_CMD_DUMMY;
1687 txdp->cmd_type_offset_bsz =
1688 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
1690 txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
1691 if (txq->tx_tail >= txq->nb_tx_desc)
1693 /* Update the tx tail register */
1695 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
1696 for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
1697 if ((txdp->cmd_type_offset_bsz &
1698 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1699 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1703 if (i >= I40E_FDIR_MAX_WAIT_US) {
1704 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1705 " time out to get DD on tx queue.");
1708 /* totally delay 10 ms to check programming status*/
1709 for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
1710 if (i40e_check_fdir_programming_status(rxq) >= 0)
1715 "Failed to program FDIR filter: programming status reported.");
1720 * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
1721 * Is done by Flow Director Programming Descriptor followed by packet
1722 * structure that contains the filter fields need to match.
1723 * @pf: board private structure
1725 * @filter: fdir filter entry
1726 * @add: 0 - delete, 1 - add
1729 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
1730 enum i40e_filter_pctype pctype,
1731 const struct i40e_fdir_filter_conf *filter,
1734 struct i40e_tx_queue *txq = pf->fdir.txq;
1735 struct i40e_rx_queue *rxq = pf->fdir.rxq;
1736 const struct i40e_fdir_action *fdir_action = &filter->action;
1737 volatile struct i40e_tx_desc *txdp;
1738 volatile struct i40e_filter_program_desc *fdirdp;
1743 PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1744 fdirdp = (volatile struct i40e_filter_program_desc *)
1745 (&txq->tx_ring[txq->tx_tail]);
1747 fdirdp->qindex_flex_ptype_vsi =
1748 rte_cpu_to_le_32((fdir_action->rx_queue <<
1749 I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1750 I40E_TXD_FLTR_QW0_QINDEX_MASK);
1752 fdirdp->qindex_flex_ptype_vsi |=
1753 rte_cpu_to_le_32((fdir_action->flex_off <<
1754 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1755 I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1757 fdirdp->qindex_flex_ptype_vsi |=
1758 rte_cpu_to_le_32((pctype <<
1759 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1760 I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1762 if (filter->input.flow_ext.is_vf)
1763 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1765 /* Use LAN VSI Id by default */
1766 vsi_id = pf->main_vsi->vsi_id;
1767 fdirdp->qindex_flex_ptype_vsi |=
1768 rte_cpu_to_le_32(((uint32_t)vsi_id <<
1769 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1770 I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1772 fdirdp->dtype_cmd_cntindex =
1773 rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1776 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1777 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1778 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1780 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1781 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1782 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1784 if (fdir_action->behavior == I40E_FDIR_REJECT)
1785 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1786 else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
1787 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1788 else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
1789 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1791 PMD_DRV_LOG(ERR, "Failed to program FDIR filter: unsupported fdir behavior.");
1795 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1796 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1797 I40E_TXD_FLTR_QW1_DEST_MASK);
1799 fdirdp->dtype_cmd_cntindex |=
1800 rte_cpu_to_le_32((fdir_action->report_status <<
1801 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
1802 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
1804 fdirdp->dtype_cmd_cntindex |=
1805 rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
1806 fdirdp->dtype_cmd_cntindex |=
1808 ((uint32_t)pf->fdir.match_counter_index <<
1809 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1810 I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
1812 fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
1814 PMD_DRV_LOG(INFO, "filling transmit descriptor.");
1815 txdp = &txq->tx_ring[txq->tx_tail + 1];
1816 txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
1817 td_cmd = I40E_TX_DESC_CMD_EOP |
1818 I40E_TX_DESC_CMD_RS |
1819 I40E_TX_DESC_CMD_DUMMY;
1821 txdp->cmd_type_offset_bsz =
1822 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
1824 txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
1825 if (txq->tx_tail >= txq->nb_tx_desc)
1827 /* Update the tx tail register */
1829 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
1830 for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
1831 if ((txdp->cmd_type_offset_bsz &
1832 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1833 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1837 if (i >= I40E_FDIR_MAX_WAIT_US) {
1839 "Failed to program FDIR filter: time out to get DD on tx queue.");
1842 /* totally delay 10 ms to check programming status*/
1843 rte_delay_us(I40E_FDIR_MAX_WAIT_US);
1844 if (i40e_check_fdir_programming_status(rxq) < 0) {
1846 "Failed to program FDIR filter: programming status reported.");
1854 * i40e_fdir_flush - clear all filters of Flow Director table
1855 * @pf: board private structure
1858 i40e_fdir_flush(struct rte_eth_dev *dev)
1860 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1861 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1863 uint16_t guarant_cnt, best_cnt;
1866 I40E_WRITE_REG(hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
1867 I40E_WRITE_FLUSH(hw);
1869 for (i = 0; i < I40E_FDIR_FLUSH_RETRY; i++) {
1870 rte_delay_ms(I40E_FDIR_FLUSH_INTERVAL_MS);
1871 reg = I40E_READ_REG(hw, I40E_PFQF_CTL_1);
1872 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
1875 if (i >= I40E_FDIR_FLUSH_RETRY) {
1876 PMD_DRV_LOG(ERR, "FD table did not flush, may need more time.");
1879 guarant_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
1880 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
1881 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
1882 best_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
1883 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
1884 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
1885 if (guarant_cnt != 0 || best_cnt != 0) {
1886 PMD_DRV_LOG(ERR, "Failed to flush FD table.");
1889 PMD_DRV_LOG(INFO, "FD table Flush success.");
1894 i40e_fdir_info_get_flex_set(struct i40e_pf *pf,
1895 struct rte_eth_flex_payload_cfg *flex_set,
1898 struct i40e_fdir_flex_pit *flex_pit;
1899 struct rte_eth_flex_payload_cfg *ptr = flex_set;
1900 uint16_t src, dst, size, j, k;
1901 uint8_t i, layer_idx;
1903 for (layer_idx = I40E_FLXPLD_L2_IDX;
1904 layer_idx <= I40E_FLXPLD_L4_IDX;
1906 if (layer_idx == I40E_FLXPLD_L2_IDX)
1907 ptr->type = RTE_ETH_L2_PAYLOAD;
1908 else if (layer_idx == I40E_FLXPLD_L3_IDX)
1909 ptr->type = RTE_ETH_L3_PAYLOAD;
1910 else if (layer_idx == I40E_FLXPLD_L4_IDX)
1911 ptr->type = RTE_ETH_L4_PAYLOAD;
1913 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
1914 flex_pit = &pf->fdir.flex_set[layer_idx *
1915 I40E_MAX_FLXPLD_FIED + i];
1916 if (flex_pit->size == 0)
1918 src = flex_pit->src_offset * sizeof(uint16_t);
1919 dst = flex_pit->dst_offset * sizeof(uint16_t);
1920 size = flex_pit->size * sizeof(uint16_t);
1921 for (j = src, k = dst; j < src + size; j++, k++)
1922 ptr->src_offset[k] = j;
1930 i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
1931 struct rte_eth_fdir_flex_mask *flex_mask,
1934 struct i40e_fdir_flex_mask *mask;
1935 struct rte_eth_fdir_flex_mask *ptr = flex_mask;
1938 uint16_t off_bytes, mask_tmp;
1940 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
1941 i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
1943 mask = &pf->fdir.flex_mask[i];
1944 flow_type = i40e_pctype_to_flowtype(pf->adapter,
1945 (enum i40e_filter_pctype)i);
1946 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
1949 for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
1950 if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
1951 ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX;
1952 ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX;
1954 ptr->mask[j * sizeof(uint16_t)] = 0x0;
1955 ptr->mask[j * sizeof(uint16_t) + 1] = 0x0;
1958 for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) {
1959 off_bytes = mask->bitmask[j].offset * sizeof(uint16_t);
1960 mask_tmp = ~mask->bitmask[j].mask;
1961 ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp);
1962 ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp);
1964 ptr->flow_type = flow_type;
1971 * i40e_fdir_info_get - get information of Flow Director
1972 * @pf: ethernet device to get info from
1973 * @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with
1974 * the flow director information.
1977 i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
1979 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1980 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1981 uint16_t num_flex_set = 0;
1982 uint16_t num_flex_mask = 0;
1984 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
1985 fdir->mode = RTE_FDIR_MODE_PERFECT;
1987 fdir->mode = RTE_FDIR_MODE_NONE;
1990 (uint32_t)hw->func_caps.fd_filters_guaranteed;
1992 (uint32_t)hw->func_caps.fd_filters_best_effort;
1993 fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
1994 fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
1995 fdir->flex_payload_unit = sizeof(uint16_t);
1996 fdir->flex_bitmask_unit = sizeof(uint16_t);
1997 fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
1998 fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF;
1999 fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD;
2001 i40e_fdir_info_get_flex_set(pf,
2002 fdir->flex_conf.flex_set,
2004 i40e_fdir_info_get_flex_mask(pf,
2005 fdir->flex_conf.flex_mask,
2008 fdir->flex_conf.nb_payloads = num_flex_set;
2009 fdir->flex_conf.nb_flexmasks = num_flex_mask;
2013 * i40e_fdir_stat_get - get statistics of Flow Director
2014 * @pf: ethernet device to get info from
2015 * @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with
2016 * the flow director statistics.
2019 i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat)
2021 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2022 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2025 fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2027 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2028 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2030 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2031 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2035 i40e_fdir_filter_set(struct rte_eth_dev *dev,
2036 struct rte_eth_fdir_filter_info *info)
2038 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2042 PMD_DRV_LOG(ERR, "Invalid pointer");
2046 switch (info->info_type) {
2047 case RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT:
2048 ret = i40e_fdir_filter_inset_select(pf,
2049 &(info->info.input_set_conf));
2052 PMD_DRV_LOG(ERR, "FD filter info type (%d) not supported",
2061 * i40e_fdir_ctrl_func - deal with all operations on flow director.
2062 * @pf: board private structure
2063 * @filter_op:operation will be taken.
2064 * @arg: a pointer to specific structure corresponding to the filter_op
2067 i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
2068 enum rte_filter_op filter_op,
2071 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2074 if ((pf->flags & I40E_FLAG_FDIR) == 0)
2077 if (filter_op == RTE_ETH_FILTER_NOP)
2080 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
2083 switch (filter_op) {
2084 case RTE_ETH_FILTER_ADD:
2085 ret = i40e_add_del_fdir_filter(dev,
2086 (struct rte_eth_fdir_filter *)arg,
2089 case RTE_ETH_FILTER_DELETE:
2090 ret = i40e_add_del_fdir_filter(dev,
2091 (struct rte_eth_fdir_filter *)arg,
2094 case RTE_ETH_FILTER_FLUSH:
2095 ret = i40e_fdir_flush(dev);
2097 case RTE_ETH_FILTER_INFO:
2098 i40e_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
2100 case RTE_ETH_FILTER_SET:
2101 ret = i40e_fdir_filter_set(dev,
2102 (struct rte_eth_fdir_filter_info *)arg);
2104 case RTE_ETH_FILTER_STATS:
2105 i40e_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
2108 PMD_DRV_LOG(ERR, "unknown operation %u.", filter_op);
2115 /* Restore flow director filter */
2117 i40e_fdir_filter_restore(struct i40e_pf *pf)
2119 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
2120 struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
2121 struct i40e_fdir_filter *f;
2122 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2124 uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
2125 uint32_t best_cnt; /**< Number of filters in best effort spaces. */
2127 TAILQ_FOREACH(f, fdir_list, rules)
2128 i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
2130 fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2132 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2133 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2135 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2136 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2138 PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d",
2139 guarant_cnt, best_cnt);