4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <rte_ether.h>
43 #include <rte_ethdev.h>
45 #include <rte_memzone.h>
46 #include <rte_malloc.h>
52 #include <rte_hash_crc.h>
54 #include "i40e_logs.h"
55 #include "base/i40e_type.h"
56 #include "base/i40e_prototype.h"
57 #include "i40e_ethdev.h"
58 #include "i40e_rxtx.h"
60 #define I40E_FDIR_MZ_NAME "FDIR_MEMZONE"
62 #define IPV6_ADDR_LEN 16
65 #define I40E_FDIR_PKT_LEN 512
66 #define I40E_FDIR_IP_DEFAULT_LEN 420
67 #define I40E_FDIR_IP_DEFAULT_TTL 0x40
68 #define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45
69 #define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50
70 #define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60000000
72 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF
73 #define I40E_FDIR_IPv6_PAYLOAD_LEN 380
74 #define I40E_FDIR_UDP_DEFAULT_LEN 400
75 #define I40E_FDIR_GTP_DEFAULT_LEN 384
76 #define I40E_FDIR_INNER_IP_DEFAULT_LEN 384
77 #define I40E_FDIR_INNER_IPV6_DEFAULT_LEN 344
79 #define I40E_FDIR_GTPC_DST_PORT 2123
80 #define I40E_FDIR_GTPU_DST_PORT 2152
81 #define I40E_FDIR_GTP_VER_FLAG_0X30 0x30
82 #define I40E_FDIR_GTP_VER_FLAG_0X32 0x32
83 #define I40E_FDIR_GTP_MSG_TYPE_0X01 0x01
84 #define I40E_FDIR_GTP_MSG_TYPE_0XFF 0xFF
86 /* Wait time for fdir filter programming */
87 #define I40E_FDIR_MAX_WAIT_US 10000
89 /* Wait count and interval for fdir filter flush */
90 #define I40E_FDIR_FLUSH_RETRY 50
91 #define I40E_FDIR_FLUSH_INTERVAL_MS 5
93 #define I40E_COUNTER_PF 2
94 /* Statistic counter index for one pf */
95 #define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF)
97 #define I40E_FDIR_FLOWS ( \
98 (1 << RTE_ETH_FLOW_FRAG_IPV4) | \
99 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
100 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
101 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
102 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
103 (1 << RTE_ETH_FLOW_FRAG_IPV6) | \
104 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
105 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
106 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
107 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
108 (1 << RTE_ETH_FLOW_L2_PAYLOAD))
110 static int i40e_fdir_filter_programming(struct i40e_pf *pf,
111 enum i40e_filter_pctype pctype,
112 const struct rte_eth_fdir_filter *filter,
114 static int i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
115 struct i40e_fdir_filter *filter);
116 static struct i40e_fdir_filter *
117 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
118 const struct i40e_fdir_input *input);
119 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
120 struct i40e_fdir_filter *filter);
122 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
123 enum i40e_filter_pctype pctype,
124 const struct i40e_fdir_filter_conf *filter,
128 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
130 struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
131 struct i40e_hmc_obj_rxq rx_ctx;
132 int err = I40E_SUCCESS;
134 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
135 /* Init the RX queue in hardware */
136 rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT;
138 rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
139 rx_ctx.qlen = rxq->nb_rx_desc;
140 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
143 rx_ctx.dtype = i40e_header_split_none;
144 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
145 rx_ctx.rxmax = ETHER_MAX_LEN;
146 rx_ctx.tphrdesc_ena = 1;
147 rx_ctx.tphwdesc_ena = 1;
148 rx_ctx.tphdata_ena = 1;
149 rx_ctx.tphhead_ena = 1;
150 rx_ctx.lrxqthresh = 2;
156 err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx);
157 if (err != I40E_SUCCESS) {
158 PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context.");
161 err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx);
162 if (err != I40E_SUCCESS) {
163 PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context.");
166 rxq->qrx_tail = hw->hw_addr +
167 I40E_QRX_TAIL(rxq->vsi->base_queue);
170 /* Init the RX tail regieter. */
171 I40E_PCI_REG_WRITE(rxq->qrx_tail, 0);
172 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
178 * i40e_fdir_setup - reserve and initialize the Flow Director resources
179 * @pf: board private structure
182 i40e_fdir_setup(struct i40e_pf *pf)
184 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
185 struct i40e_vsi *vsi;
186 int err = I40E_SUCCESS;
187 char z_name[RTE_MEMZONE_NAMESIZE];
188 const struct rte_memzone *mz = NULL;
189 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
191 if ((pf->flags & I40E_FLAG_FDIR) == 0) {
192 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
193 return I40E_NOT_SUPPORTED;
196 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u,"
197 " num_filters_best_effort = %u.",
198 hw->func_caps.fd_filters_guaranteed,
199 hw->func_caps.fd_filters_best_effort);
201 vsi = pf->fdir.fdir_vsi;
203 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
206 /* make new FDIR VSI */
207 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
209 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
210 return I40E_ERR_NO_AVAILABLE_VSI;
212 pf->fdir.fdir_vsi = vsi;
214 /*Fdir tx queue setup*/
215 err = i40e_fdir_setup_tx_resources(pf);
217 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
221 /*Fdir rx queue setup*/
222 err = i40e_fdir_setup_rx_resources(pf);
224 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
228 err = i40e_tx_queue_init(pf->fdir.txq);
230 PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization.");
234 /* need switch on before dev start*/
235 err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE);
237 PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on.");
241 /* Init the rx queue in hardware */
242 err = i40e_fdir_rx_queue_init(pf->fdir.rxq);
244 PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization.");
248 /* switch on rx queue */
249 err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE);
251 PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on.");
255 /* reserve memory for the fdir programming packet */
256 snprintf(z_name, sizeof(z_name), "%s_%s_%d",
257 eth_dev->device->driver->name,
259 eth_dev->data->port_id);
260 mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
262 PMD_DRV_LOG(ERR, "Cannot init memzone for "
263 "flow director program packet.");
264 err = I40E_ERR_NO_MEMORY;
267 pf->fdir.prg_pkt = mz->addr;
268 pf->fdir.dma_addr = mz->phys_addr;
270 pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
271 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
276 i40e_dev_rx_queue_release(pf->fdir.rxq);
279 i40e_dev_tx_queue_release(pf->fdir.txq);
282 i40e_vsi_release(vsi);
283 pf->fdir.fdir_vsi = NULL;
288 * i40e_fdir_teardown - release the Flow Director resources
289 * @pf: board private structure
292 i40e_fdir_teardown(struct i40e_pf *pf)
294 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
295 struct i40e_vsi *vsi;
297 vsi = pf->fdir.fdir_vsi;
300 int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
302 PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off");
303 err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
305 PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
306 i40e_dev_rx_queue_release(pf->fdir.rxq);
308 i40e_dev_tx_queue_release(pf->fdir.txq);
310 i40e_vsi_release(vsi);
311 pf->fdir.fdir_vsi = NULL;
314 /* check whether the flow director table in empty */
316 i40e_fdir_empty(struct i40e_hw *hw)
318 uint32_t guarant_cnt, best_cnt;
320 guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
321 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
322 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
323 best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
324 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
325 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
326 if (best_cnt + guarant_cnt > 0)
333 * Initialize the configuration about bytes stream extracted as flexible payload
337 i40e_init_flx_pld(struct i40e_pf *pf)
339 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
345 * Define the bytes stream extracted as flexible payload in
346 * field vector. By default, select 8 words from the beginning
347 * of payload as flexible payload.
349 for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) {
350 index = i * I40E_MAX_FLXPLD_FIED;
351 pf->fdir.flex_set[index].src_offset = 0;
352 pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM;
353 pf->fdir.flex_set[index].dst_offset = 0;
354 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900);
356 I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/
358 I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/
361 /* initialize the masks */
362 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
363 pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
364 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
366 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
368 pf->fdir.flex_mask[pctype].word_mask = 0;
369 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
370 for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
371 pf->fdir.flex_mask[pctype].bitmask[i].offset = 0;
372 pf->fdir.flex_mask[pctype].bitmask[i].mask = 0;
373 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0);
378 #define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
379 if ((flex_pit2).src_offset < \
380 (flex_pit1).src_offset + (flex_pit1).size) { \
381 PMD_DRV_LOG(ERR, "src_offset should be not" \
382 " less than than previous offset" \
383 " + previous FSIZE."); \
389 * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure,
390 * and the flex_pit will be sorted by it's src_offset value
392 static inline uint16_t
393 i40e_srcoff_to_flx_pit(const uint16_t *src_offset,
394 struct i40e_fdir_flex_pit *flex_pit)
396 uint16_t src_tmp, size, num = 0;
397 uint16_t i, k, j = 0;
399 while (j < I40E_FDIR_MAX_FLEX_LEN) {
401 for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) {
402 if (src_offset[j + 1] == src_offset[j] + 1)
407 src_tmp = src_offset[j] + 1 - size;
408 /* the flex_pit need to be sort by src_offset */
409 for (i = 0; i < num; i++) {
410 if (src_tmp < flex_pit[i].src_offset)
413 /* if insert required, move backward */
414 for (k = num; k > i; k--)
415 flex_pit[k] = flex_pit[k - 1];
417 flex_pit[i].dst_offset = j + 1 - size;
418 flex_pit[i].src_offset = src_tmp;
419 flex_pit[i].size = size;
426 /* i40e_check_fdir_flex_payload -check flex payload configuration arguments */
428 i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
430 struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN];
433 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
434 if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
435 PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
440 memset(flex_pit, 0, sizeof(flex_pit));
441 num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
442 if (num > I40E_MAX_FLXPLD_FIED) {
443 PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
446 for (i = 0; i < num; i++) {
447 if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 ||
448 flex_pit[i].src_offset & 0x01) {
449 PMD_DRV_LOG(ERR, "flexpayload should be measured"
454 I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]);
460 * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration
461 * arguments are valid
464 i40e_check_fdir_flex_conf(const struct i40e_adapter *adapter,
465 const struct rte_eth_fdir_flex_conf *conf)
467 const struct rte_eth_flex_payload_cfg *flex_cfg;
468 const struct rte_eth_fdir_flex_mask *flex_mask;
473 enum i40e_filter_pctype pctype;
476 PMD_DRV_LOG(INFO, "NULL pointer.");
479 /* check flexible payload setting configuration */
480 if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) {
481 PMD_DRV_LOG(ERR, "invalid number of payload setting.");
484 for (i = 0; i < conf->nb_payloads; i++) {
485 flex_cfg = &conf->flex_set[i];
486 if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) {
487 PMD_DRV_LOG(ERR, "invalid payload type.");
490 ret = i40e_check_fdir_flex_payload(flex_cfg);
492 PMD_DRV_LOG(ERR, "invalid flex payload arguments.");
497 /* check flex mask setting configuration */
498 if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) {
499 PMD_DRV_LOG(ERR, "invalid number of flex masks.");
502 for (i = 0; i < conf->nb_flexmasks; i++) {
503 flex_mask = &conf->flex_mask[i];
504 pctype = i40e_flowtype_to_pctype(adapter, flex_mask->flow_type);
505 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
506 PMD_DRV_LOG(WARNING, "invalid flow type.");
510 for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) {
511 mask_tmp = I40E_WORD(flex_mask->mask[j],
512 flex_mask->mask[j + 1]);
513 if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) {
515 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) {
516 PMD_DRV_LOG(ERR, " exceed maximal"
517 " number of bitmasks.");
527 * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload
528 * @pf: board private structure
529 * @cfg: the rule how bytes stream is extracted as flexible payload
532 i40e_set_flx_pld_cfg(struct i40e_pf *pf,
533 const struct rte_eth_flex_payload_cfg *cfg)
535 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
536 struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
538 uint16_t num, min_next_off; /* in words */
539 uint8_t field_idx = 0;
540 uint8_t layer_idx = 0;
543 if (cfg->type == RTE_ETH_L2_PAYLOAD)
544 layer_idx = I40E_FLXPLD_L2_IDX;
545 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
546 layer_idx = I40E_FLXPLD_L3_IDX;
547 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
548 layer_idx = I40E_FLXPLD_L4_IDX;
550 memset(flex_pit, 0, sizeof(flex_pit));
551 num = i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit);
553 for (i = 0; i < RTE_MIN(num, RTE_DIM(flex_pit)); i++) {
554 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
555 /* record the info in fdir structure */
556 pf->fdir.flex_set[field_idx].src_offset =
557 flex_pit[i].src_offset / sizeof(uint16_t);
558 pf->fdir.flex_set[field_idx].size =
559 flex_pit[i].size / sizeof(uint16_t);
560 pf->fdir.flex_set[field_idx].dst_offset =
561 flex_pit[i].dst_offset / sizeof(uint16_t);
562 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
563 pf->fdir.flex_set[field_idx].size,
564 pf->fdir.flex_set[field_idx].dst_offset);
566 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
568 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
569 pf->fdir.flex_set[field_idx].size;
571 for (; i < I40E_MAX_FLXPLD_FIED; i++) {
572 /* set the non-used register obeying register's constrain */
573 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
574 NONUSE_FLX_PIT_DEST_OFF);
576 I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i),
583 * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload
584 * @pf: board private structure
585 * @pctype: packet classify type
586 * @flex_masks: mask for flexible payload
589 i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
590 enum i40e_filter_pctype pctype,
591 const struct rte_eth_fdir_flex_mask *mask_cfg)
593 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
594 struct i40e_fdir_flex_mask *flex_mask;
595 uint32_t flxinset, fd_mask;
597 uint8_t i, nb_bitmask = 0;
599 flex_mask = &pf->fdir.flex_mask[pctype];
600 memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
601 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
602 mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]);
603 if (mask_tmp != 0x0) {
604 flex_mask->word_mask |=
605 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
606 if (mask_tmp != UINT16_MAX) {
608 flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp;
609 flex_mask->bitmask[nb_bitmask].offset =
610 i / sizeof(uint16_t);
615 /* write mask to hw */
616 flxinset = (flex_mask->word_mask <<
617 I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
618 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
619 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
621 for (i = 0; i < nb_bitmask; i++) {
622 fd_mask = (flex_mask->bitmask[i].mask <<
623 I40E_PRTQF_FD_MSK_MASK_SHIFT) &
624 I40E_PRTQF_FD_MSK_MASK_MASK;
625 fd_mask |= ((flex_mask->bitmask[i].offset +
626 I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
627 I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
628 I40E_PRTQF_FD_MSK_OFFSET_MASK;
629 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
634 * Configure flow director related setting
637 i40e_fdir_configure(struct rte_eth_dev *dev)
639 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
640 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
641 struct rte_eth_fdir_flex_conf *conf;
642 enum i40e_filter_pctype pctype;
648 * configuration need to be done before
649 * flow director filters are added
650 * If filters exist, flush them.
652 if (i40e_fdir_empty(hw) < 0) {
653 ret = i40e_fdir_flush(dev);
655 PMD_DRV_LOG(ERR, "failed to flush fdir table.");
660 /* enable FDIR filter */
661 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
662 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
663 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
665 i40e_init_flx_pld(pf); /* set flex config to default value */
667 conf = &dev->data->dev_conf.fdir_conf.flex_conf;
668 ret = i40e_check_fdir_flex_conf(pf->adapter, conf);
670 PMD_DRV_LOG(ERR, " invalid configuration arguments.");
673 /* configure flex payload */
674 for (i = 0; i < conf->nb_payloads; i++)
675 i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
676 /* configure flex mask*/
677 for (i = 0; i < conf->nb_flexmasks; i++) {
678 if (hw->mac.type == I40E_MAC_X722) {
679 /* get translated pctype value in fd pctype register */
680 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
681 hw, I40E_GLQF_FD_PCTYPES(
682 (int)i40e_flowtype_to_pctype(pf->adapter,
683 conf->flex_mask[i].flow_type)));
685 pctype = i40e_flowtype_to_pctype(pf->adapter,
686 conf->flex_mask[i].flow_type);
688 i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]);
695 i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
696 unsigned char *raw_pkt,
699 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
700 uint16_t *ether_type;
701 uint8_t len = 2 * sizeof(struct ether_addr);
703 struct ipv6_hdr *ip6;
704 static const uint8_t next_proto[] = {
705 [RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP,
706 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
707 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
708 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = IPPROTO_SCTP,
709 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = IPPROTO_IP,
710 [RTE_ETH_FLOW_FRAG_IPV6] = IPPROTO_NONE,
711 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
712 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
713 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = IPPROTO_SCTP,
714 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = IPPROTO_NONE,
717 raw_pkt += 2 * sizeof(struct ether_addr);
718 if (vlan && fdir_input->flow_ext.vlan_tci) {
719 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
720 rte_memcpy(raw_pkt + sizeof(uint16_t),
721 &fdir_input->flow_ext.vlan_tci,
723 raw_pkt += sizeof(vlan_frame);
724 len += sizeof(vlan_frame);
726 ether_type = (uint16_t *)raw_pkt;
727 raw_pkt += sizeof(uint16_t);
728 len += sizeof(uint16_t);
730 switch (fdir_input->flow_type) {
731 case RTE_ETH_FLOW_L2_PAYLOAD:
732 *ether_type = fdir_input->flow.l2_flow.ether_type;
734 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
735 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
736 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
737 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
738 case RTE_ETH_FLOW_FRAG_IPV4:
739 ip = (struct ipv4_hdr *)raw_pkt;
741 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
742 ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
743 /* set len to by default */
744 ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
745 ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
746 fdir_input->flow.ip4_flow.proto :
747 next_proto[fdir_input->flow_type];
748 ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
749 fdir_input->flow.ip4_flow.ttl :
750 I40E_FDIR_IP_DEFAULT_TTL;
751 ip->type_of_service = fdir_input->flow.ip4_flow.tos;
753 * The source and destination fields in the transmitted packet
754 * need to be presented in a reversed order with respect
755 * to the expected received packets.
757 ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
758 ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
759 len += sizeof(struct ipv4_hdr);
761 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
762 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
763 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
764 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
765 case RTE_ETH_FLOW_FRAG_IPV6:
766 ip6 = (struct ipv6_hdr *)raw_pkt;
768 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
770 rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
771 (fdir_input->flow.ipv6_flow.tc <<
772 I40E_FDIR_IPv6_TC_OFFSET));
774 rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
775 ip6->proto = fdir_input->flow.ipv6_flow.proto ?
776 fdir_input->flow.ipv6_flow.proto :
777 next_proto[fdir_input->flow_type];
778 ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
779 fdir_input->flow.ipv6_flow.hop_limits :
780 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
782 * The source and destination fields in the transmitted packet
783 * need to be presented in a reversed order with respect
784 * to the expected received packets.
786 rte_memcpy(&(ip6->src_addr),
787 &(fdir_input->flow.ipv6_flow.dst_ip),
789 rte_memcpy(&(ip6->dst_addr),
790 &(fdir_input->flow.ipv6_flow.src_ip),
792 len += sizeof(struct ipv6_hdr);
795 PMD_DRV_LOG(ERR, "unknown flow type %u.",
796 fdir_input->flow_type);
804 * i40e_fdir_construct_pkt - construct packet based on fields in input
805 * @pf: board private structure
806 * @fdir_input: input set of the flow director entry
807 * @raw_pkt: a packet to be constructed
810 i40e_fdir_construct_pkt(struct i40e_pf *pf,
811 const struct rte_eth_fdir_input *fdir_input,
812 unsigned char *raw_pkt)
814 unsigned char *payload, *ptr;
817 struct sctp_hdr *sctp;
818 uint8_t size, dst = 0;
819 uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
822 /* fill the ethernet and IP head */
823 len = i40e_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
824 !!fdir_input->flow_ext.vlan_tci);
828 /* fill the L4 head */
829 switch (fdir_input->flow_type) {
830 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
831 udp = (struct udp_hdr *)(raw_pkt + len);
832 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
834 * The source and destination fields in the transmitted packet
835 * need to be presented in a reversed order with respect
836 * to the expected received packets.
838 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
839 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
840 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
843 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
844 tcp = (struct tcp_hdr *)(raw_pkt + len);
845 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
847 * The source and destination fields in the transmitted packet
848 * need to be presented in a reversed order with respect
849 * to the expected received packets.
851 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
852 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
853 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
856 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
857 sctp = (struct sctp_hdr *)(raw_pkt + len);
858 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
860 * The source and destination fields in the transmitted packet
861 * need to be presented in a reversed order with respect
862 * to the expected received packets.
864 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
865 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
866 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
869 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
870 case RTE_ETH_FLOW_FRAG_IPV4:
871 payload = raw_pkt + len;
872 set_idx = I40E_FLXPLD_L3_IDX;
875 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
876 udp = (struct udp_hdr *)(raw_pkt + len);
877 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
879 * The source and destination fields in the transmitted packet
880 * need to be presented in a reversed order with respect
881 * to the expected received packets.
883 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
884 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
885 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
888 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
889 tcp = (struct tcp_hdr *)(raw_pkt + len);
890 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
892 * The source and destination fields in the transmitted packet
893 * need to be presented in a reversed order with respect
894 * to the expected received packets.
896 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
897 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
898 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
901 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
902 sctp = (struct sctp_hdr *)(raw_pkt + len);
903 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
905 * The source and destination fields in the transmitted packet
906 * need to be presented in a reversed order with respect
907 * to the expected received packets.
909 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
910 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
911 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
914 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
915 case RTE_ETH_FLOW_FRAG_IPV6:
916 payload = raw_pkt + len;
917 set_idx = I40E_FLXPLD_L3_IDX;
919 case RTE_ETH_FLOW_L2_PAYLOAD:
920 payload = raw_pkt + len;
922 * ARP packet is a special case on which the payload
923 * starts after the whole ARP header
925 if (fdir_input->flow.l2_flow.ether_type ==
926 rte_cpu_to_be_16(ETHER_TYPE_ARP))
927 payload += sizeof(struct arp_hdr);
928 set_idx = I40E_FLXPLD_L2_IDX;
931 PMD_DRV_LOG(ERR, "unknown flow type %u.", fdir_input->flow_type);
935 /* fill the flexbytes to payload */
936 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
937 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
938 size = pf->fdir.flex_set[pit_idx].size;
941 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
943 pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
945 &fdir_input->flow_ext.flexbytes[dst],
946 size * sizeof(uint16_t));
952 static struct i40e_customized_pctype *
953 i40e_flow_fdir_find_customized_pctype(struct i40e_pf *pf, uint8_t pctype)
955 struct i40e_customized_pctype *cus_pctype;
956 enum i40e_new_pctype i = I40E_CUSTOMIZED_GTPC;
958 for (; i < I40E_CUSTOMIZED_MAX; i++) {
959 cus_pctype = &pf->customized_pctype[i];
960 if (pctype == cus_pctype->pctype)
967 i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf,
968 const struct i40e_fdir_input *fdir_input,
969 unsigned char *raw_pkt,
972 struct i40e_customized_pctype *cus_pctype = NULL;
973 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
974 uint16_t *ether_type;
975 uint8_t len = 2 * sizeof(struct ether_addr);
977 struct ipv6_hdr *ip6;
978 uint8_t pctype = fdir_input->pctype;
979 bool is_customized_pctype = fdir_input->flow_ext.customized_pctype;
980 static const uint8_t next_proto[] = {
981 [I40E_FILTER_PCTYPE_FRAG_IPV4] = IPPROTO_IP,
982 [I40E_FILTER_PCTYPE_NONF_IPV4_TCP] = IPPROTO_TCP,
983 [I40E_FILTER_PCTYPE_NONF_IPV4_UDP] = IPPROTO_UDP,
984 [I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] = IPPROTO_SCTP,
985 [I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] = IPPROTO_IP,
986 [I40E_FILTER_PCTYPE_FRAG_IPV6] = IPPROTO_NONE,
987 [I40E_FILTER_PCTYPE_NONF_IPV6_TCP] = IPPROTO_TCP,
988 [I40E_FILTER_PCTYPE_NONF_IPV6_UDP] = IPPROTO_UDP,
989 [I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] = IPPROTO_SCTP,
990 [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE,
993 raw_pkt += 2 * sizeof(struct ether_addr);
994 if (vlan && fdir_input->flow_ext.vlan_tci) {
995 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
996 rte_memcpy(raw_pkt + sizeof(uint16_t),
997 &fdir_input->flow_ext.vlan_tci,
999 raw_pkt += sizeof(vlan_frame);
1000 len += sizeof(vlan_frame);
1002 ether_type = (uint16_t *)raw_pkt;
1003 raw_pkt += sizeof(uint16_t);
1004 len += sizeof(uint16_t);
1006 if (is_customized_pctype) {
1007 cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
1009 PMD_DRV_LOG(ERR, "unknown pctype %u.",
1010 fdir_input->pctype);
1015 if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD)
1016 *ether_type = fdir_input->flow.l2_flow.ether_type;
1017 else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP ||
1018 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP ||
1019 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP ||
1020 pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1021 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4 ||
1022 is_customized_pctype) {
1023 ip = (struct ipv4_hdr *)raw_pkt;
1025 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
1026 ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
1027 /* set len to by default */
1028 ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
1029 ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
1030 fdir_input->flow.ip4_flow.ttl :
1031 I40E_FDIR_IP_DEFAULT_TTL;
1032 ip->type_of_service = fdir_input->flow.ip4_flow.tos;
1034 * The source and destination fields in the transmitted packet
1035 * need to be presented in a reversed order with respect
1036 * to the expected received packets.
1038 ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
1039 ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
1041 if (!is_customized_pctype)
1042 ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
1043 fdir_input->flow.ip4_flow.proto :
1044 next_proto[fdir_input->pctype];
1045 else if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1046 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1047 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1048 cus_pctype->index == I40E_CUSTOMIZED_GTPU)
1049 ip->next_proto_id = IPPROTO_UDP;
1050 len += sizeof(struct ipv4_hdr);
1051 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP ||
1052 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP ||
1053 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP ||
1054 pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1055 pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
1056 ip6 = (struct ipv6_hdr *)raw_pkt;
1058 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
1060 rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
1061 (fdir_input->flow.ipv6_flow.tc <<
1062 I40E_FDIR_IPv6_TC_OFFSET));
1064 rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
1065 ip6->proto = fdir_input->flow.ipv6_flow.proto ?
1066 fdir_input->flow.ipv6_flow.proto :
1067 next_proto[fdir_input->pctype];
1068 ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
1069 fdir_input->flow.ipv6_flow.hop_limits :
1070 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
1072 * The source and destination fields in the transmitted packet
1073 * need to be presented in a reversed order with respect
1074 * to the expected received packets.
1076 rte_memcpy(&ip6->src_addr,
1077 &fdir_input->flow.ipv6_flow.dst_ip,
1079 rte_memcpy(&ip6->dst_addr,
1080 &fdir_input->flow.ipv6_flow.src_ip,
1082 len += sizeof(struct ipv6_hdr);
1084 PMD_DRV_LOG(ERR, "unknown pctype %u.",
1085 fdir_input->pctype);
1093 * i40e_flow_fdir_construct_pkt - construct packet based on fields in input
1094 * @pf: board private structure
1095 * @fdir_input: input set of the flow director entry
1096 * @raw_pkt: a packet to be constructed
1099 i40e_flow_fdir_construct_pkt(struct i40e_pf *pf,
1100 const struct i40e_fdir_input *fdir_input,
1101 unsigned char *raw_pkt)
1103 unsigned char *payload = NULL;
1105 struct udp_hdr *udp;
1106 struct tcp_hdr *tcp;
1107 struct sctp_hdr *sctp;
1108 struct rte_flow_item_gtp *gtp;
1109 struct ipv4_hdr *gtp_ipv4;
1110 struct ipv6_hdr *gtp_ipv6;
1111 uint8_t size, dst = 0;
1112 uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
1114 uint8_t pctype = fdir_input->pctype;
1115 struct i40e_customized_pctype *cus_pctype;
1117 /* raw pcket template - just copy contents of the raw packet */
1118 if (fdir_input->flow_ext.pkt_template) {
1119 memcpy(raw_pkt, fdir_input->flow.raw_flow.packet,
1120 fdir_input->flow.raw_flow.length);
1124 /* fill the ethernet and IP head */
1125 len = i40e_flow_fdir_fill_eth_ip_head(pf, fdir_input, raw_pkt,
1126 !!fdir_input->flow_ext.vlan_tci);
1130 /* fill the L4 head */
1131 if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
1132 udp = (struct udp_hdr *)(raw_pkt + len);
1133 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
1135 * The source and destination fields in the transmitted packet
1136 * need to be presented in a reversed order with respect
1137 * to the expected received packets.
1139 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
1140 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
1141 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1142 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
1143 tcp = (struct tcp_hdr *)(raw_pkt + len);
1144 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
1146 * The source and destination fields in the transmitted packet
1147 * need to be presented in a reversed order with respect
1148 * to the expected received packets.
1150 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
1151 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
1152 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1153 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) {
1154 sctp = (struct sctp_hdr *)(raw_pkt + len);
1155 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
1157 * The source and destination fields in the transmitted packet
1158 * need to be presented in a reversed order with respect
1159 * to the expected received packets.
1161 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
1162 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
1163 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
1164 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ||
1165 pctype == I40E_FILTER_PCTYPE_FRAG_IPV4) {
1166 payload = raw_pkt + len;
1167 set_idx = I40E_FLXPLD_L3_IDX;
1168 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
1169 udp = (struct udp_hdr *)(raw_pkt + len);
1170 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
1172 * The source and destination fields in the transmitted packet
1173 * need to be presented in a reversed order with respect
1174 * to the expected received packets.
1176 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
1177 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
1178 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
1179 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
1180 tcp = (struct tcp_hdr *)(raw_pkt + len);
1181 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
1183 * The source and destination fields in the transmitted packet
1184 * need to be presented in a reversed order with respect
1185 * to the expected received packets.
1187 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
1188 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
1189 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
1190 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) {
1191 sctp = (struct sctp_hdr *)(raw_pkt + len);
1192 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
1194 * The source and destination fields in the transmitted packet
1195 * need to be presented in a reversed order with respect
1196 * to the expected received packets.
1198 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
1199 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
1200 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
1201 } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ||
1202 pctype == I40E_FILTER_PCTYPE_FRAG_IPV6) {
1203 payload = raw_pkt + len;
1204 set_idx = I40E_FLXPLD_L3_IDX;
1205 } else if (pctype == I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1206 payload = raw_pkt + len;
1208 * ARP packet is a special case on which the payload
1209 * starts after the whole ARP header
1211 if (fdir_input->flow.l2_flow.ether_type ==
1212 rte_cpu_to_be_16(ETHER_TYPE_ARP))
1213 payload += sizeof(struct arp_hdr);
1214 set_idx = I40E_FLXPLD_L2_IDX;
1215 } else if (fdir_input->flow_ext.customized_pctype) {
1216 /* If customized pctype is used */
1217 cus_pctype = i40e_flow_fdir_find_customized_pctype(pf, pctype);
1218 if (cus_pctype->index == I40E_CUSTOMIZED_GTPC ||
1219 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4 ||
1220 cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV6 ||
1221 cus_pctype->index == I40E_CUSTOMIZED_GTPU) {
1222 udp = (struct udp_hdr *)(raw_pkt + len);
1224 rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
1226 gtp = (struct rte_flow_item_gtp *)
1227 ((unsigned char *)udp + sizeof(struct udp_hdr));
1229 rte_cpu_to_be_16(I40E_FDIR_GTP_DEFAULT_LEN);
1230 gtp->teid = fdir_input->flow.gtp_flow.teid;
1231 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0X01;
1233 /* GTP-C message type is not supported. */
1234 if (cus_pctype->index == I40E_CUSTOMIZED_GTPC) {
1236 rte_cpu_to_be_16(I40E_FDIR_GTPC_DST_PORT);
1237 gtp->v_pt_rsv_flags =
1238 I40E_FDIR_GTP_VER_FLAG_0X32;
1241 rte_cpu_to_be_16(I40E_FDIR_GTPU_DST_PORT);
1242 gtp->v_pt_rsv_flags =
1243 I40E_FDIR_GTP_VER_FLAG_0X30;
1246 if (cus_pctype->index == I40E_CUSTOMIZED_GTPU_IPV4) {
1247 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1248 gtp_ipv4 = (struct ipv4_hdr *)
1249 ((unsigned char *)gtp +
1250 sizeof(struct rte_flow_item_gtp));
1251 gtp_ipv4->version_ihl =
1252 I40E_FDIR_IP_DEFAULT_VERSION_IHL;
1253 gtp_ipv4->next_proto_id = IPPROTO_IP;
1254 gtp_ipv4->total_length =
1256 I40E_FDIR_INNER_IP_DEFAULT_LEN);
1257 payload = (unsigned char *)gtp_ipv4 +
1258 sizeof(struct ipv4_hdr);
1259 } else if (cus_pctype->index ==
1260 I40E_CUSTOMIZED_GTPU_IPV6) {
1261 gtp->msg_type = I40E_FDIR_GTP_MSG_TYPE_0XFF;
1262 gtp_ipv6 = (struct ipv6_hdr *)
1263 ((unsigned char *)gtp +
1264 sizeof(struct rte_flow_item_gtp));
1265 gtp_ipv6->vtc_flow =
1267 I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
1268 (0 << I40E_FDIR_IPv6_TC_OFFSET));
1269 gtp_ipv6->proto = IPPROTO_NONE;
1270 gtp_ipv6->payload_len =
1272 I40E_FDIR_INNER_IPV6_DEFAULT_LEN);
1273 gtp_ipv6->hop_limits =
1274 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
1275 payload = (unsigned char *)gtp_ipv6 +
1276 sizeof(struct ipv6_hdr);
1278 payload = (unsigned char *)gtp +
1279 sizeof(struct rte_flow_item_gtp);
1282 PMD_DRV_LOG(ERR, "unknown pctype %u.",
1283 fdir_input->pctype);
1287 /* fill the flexbytes to payload */
1288 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
1289 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
1290 size = pf->fdir.flex_set[pit_idx].size;
1293 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
1295 pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
1296 (void)rte_memcpy(ptr,
1297 &fdir_input->flow_ext.flexbytes[dst],
1298 size * sizeof(uint16_t));
1304 /* Construct the tx flags */
1305 static inline uint64_t
1306 i40e_build_ctob(uint32_t td_cmd,
1311 return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
1312 ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
1313 ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
1314 ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
1315 ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
1319 * check the programming status descriptor in rx queue.
1320 * done after Programming Flow Director is programmed on
1324 i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
1326 volatile union i40e_rx_desc *rxdp;
1333 rxdp = &rxq->rx_ring[rxq->rx_tail];
1334 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
1335 rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
1336 >> I40E_RXD_QW1_STATUS_SHIFT;
1338 if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
1339 len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT;
1340 id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
1341 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
1343 if (len == I40E_RX_PROG_STATUS_DESC_LENGTH &&
1344 id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) {
1346 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
1347 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
1348 if (error == (0x1 <<
1349 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
1350 PMD_DRV_LOG(ERR, "Failed to add FDIR filter"
1351 " (FD_ID %u): programming status"
1353 rxdp->wb.qword0.hi_dword.fd_id);
1355 } else if (error == (0x1 <<
1356 I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
1357 PMD_DRV_LOG(ERR, "Failed to delete FDIR filter"
1358 " (FD_ID %u): programming status"
1360 rxdp->wb.qword0.hi_dword.fd_id);
1363 PMD_DRV_LOG(ERR, "invalid programming status"
1364 " reported, error = %u.", error);
1366 PMD_DRV_LOG(ERR, "unknown programming status"
1367 " reported, len = %d, id = %u.", len, id);
1368 rxdp->wb.qword1.status_error_len = 0;
1370 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
1377 i40e_fdir_filter_convert(const struct i40e_fdir_filter_conf *input,
1378 struct i40e_fdir_filter *filter)
1380 rte_memcpy(&filter->fdir, input, sizeof(struct i40e_fdir_filter_conf));
1381 if (input->input.flow_ext.pkt_template) {
1382 filter->fdir.input.flow.raw_flow.packet = NULL;
1383 filter->fdir.input.flow.raw_flow.length =
1384 rte_hash_crc(input->input.flow.raw_flow.packet,
1385 input->input.flow.raw_flow.length,
1386 input->input.flow.raw_flow.pctype);
1391 /* Check if there exists the flow director filter */
1392 static struct i40e_fdir_filter *
1393 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
1394 const struct i40e_fdir_input *input)
1398 if (input->flow_ext.pkt_template)
1399 ret = rte_hash_lookup_with_hash(fdir_info->hash_table,
1400 (const void *)input,
1401 input->flow.raw_flow.length);
1403 ret = rte_hash_lookup(fdir_info->hash_table,
1404 (const void *)input);
1408 return fdir_info->hash_map[ret];
1411 /* Add a flow director filter into the SW list */
1413 i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
1415 struct i40e_fdir_info *fdir_info = &pf->fdir;
1418 if (filter->fdir.input.flow_ext.pkt_template)
1419 ret = rte_hash_add_key_with_hash(fdir_info->hash_table,
1420 &filter->fdir.input,
1421 filter->fdir.input.flow.raw_flow.length);
1423 ret = rte_hash_add_key(fdir_info->hash_table,
1424 &filter->fdir.input);
1427 "Failed to insert fdir filter to hash table %d!",
1431 fdir_info->hash_map[ret] = filter;
1433 TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
1438 /* Delete a flow director filter from the SW list */
1440 i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct i40e_fdir_input *input)
1442 struct i40e_fdir_info *fdir_info = &pf->fdir;
1443 struct i40e_fdir_filter *filter;
1446 if (input->flow_ext.pkt_template)
1447 ret = rte_hash_del_key_with_hash(fdir_info->hash_table,
1449 input->flow.raw_flow.length);
1451 ret = rte_hash_del_key(fdir_info->hash_table, input);
1454 "Failed to delete fdir filter to hash table %d!",
1458 filter = fdir_info->hash_map[ret];
1459 fdir_info->hash_map[ret] = NULL;
1461 TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
1468 * i40e_add_del_fdir_filter - add or remove a flow director filter.
1469 * @pf: board private structure
1470 * @filter: fdir filter entry
1471 * @add: 0 - delete, 1 - add
1474 i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
1475 const struct rte_eth_fdir_filter *filter,
1478 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1479 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1480 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1481 enum i40e_filter_pctype pctype;
1484 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1485 PMD_DRV_LOG(ERR, "FDIR is not enabled, please"
1486 " check the mode in fdir_conf.");
1490 pctype = i40e_flowtype_to_pctype(pf->adapter, filter->input.flow_type);
1491 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
1492 PMD_DRV_LOG(ERR, "invalid flow_type input.");
1495 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1496 PMD_DRV_LOG(ERR, "Invalid queue ID");
1499 if (filter->input.flow_ext.is_vf &&
1500 filter->input.flow_ext.dst_id >= pf->vf_num) {
1501 PMD_DRV_LOG(ERR, "Invalid VF ID");
1505 memset(pkt, 0, I40E_FDIR_PKT_LEN);
1507 ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
1509 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1513 if (hw->mac.type == I40E_MAC_X722) {
1514 /* get translated pctype value in fd pctype register */
1515 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1516 hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1519 ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
1521 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1530 * i40e_flow_add_del_fdir_filter - add or remove a flow director filter.
1531 * @pf: board private structure
1532 * @filter: fdir filter entry
1533 * @add: 0 - delete, 1 - add
1536 i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev,
1537 const struct i40e_fdir_filter_conf *filter,
1540 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1541 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1542 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1543 enum i40e_filter_pctype pctype;
1544 struct i40e_fdir_info *fdir_info = &pf->fdir;
1545 struct i40e_fdir_filter *fdir_filter, *node;
1546 struct i40e_fdir_filter check_filter; /* Check if the filter exists */
1549 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1550 PMD_DRV_LOG(ERR, "FDIR is not enabled, please check the mode in fdir_conf.");
1554 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1555 PMD_DRV_LOG(ERR, "Invalid queue ID");
1558 if (filter->input.flow_ext.is_vf &&
1559 filter->input.flow_ext.dst_id >= pf->vf_num) {
1560 PMD_DRV_LOG(ERR, "Invalid VF ID");
1563 if (filter->input.flow_ext.pkt_template) {
1564 if (filter->input.flow.raw_flow.length > I40E_FDIR_PKT_LEN ||
1565 !filter->input.flow.raw_flow.packet) {
1566 PMD_DRV_LOG(ERR, "Invalid raw packet template"
1567 " flow filter parameters!");
1570 pctype = filter->input.flow.raw_flow.pctype;
1572 pctype = filter->input.pctype;
1575 /* Check if there is the filter in SW list */
1576 memset(&check_filter, 0, sizeof(check_filter));
1577 i40e_fdir_filter_convert(filter, &check_filter);
1578 node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
1581 "Conflict with existing flow director rules!");
1585 if (!add && !node) {
1587 "There's no corresponding flow firector filter!");
1591 memset(pkt, 0, I40E_FDIR_PKT_LEN);
1593 ret = i40e_flow_fdir_construct_pkt(pf, &filter->input, pkt);
1595 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1599 if (hw->mac.type == I40E_MAC_X722) {
1600 /* get translated pctype value in fd pctype register */
1601 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1602 hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1605 ret = i40e_flow_fdir_filter_programming(pf, pctype, filter, add);
1607 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1613 fdir_filter = rte_zmalloc("fdir_filter",
1614 sizeof(*fdir_filter), 0);
1615 rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
1616 ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
1618 ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
1625 * i40e_fdir_filter_programming - Program a flow director filter rule.
1626 * Is done by Flow Director Programming Descriptor followed by packet
1627 * structure that contains the filter fields need to match.
1628 * @pf: board private structure
1630 * @filter: fdir filter entry
1631 * @add: 0 - delete, 1 - add
1634 i40e_fdir_filter_programming(struct i40e_pf *pf,
1635 enum i40e_filter_pctype pctype,
1636 const struct rte_eth_fdir_filter *filter,
1639 struct i40e_tx_queue *txq = pf->fdir.txq;
1640 struct i40e_rx_queue *rxq = pf->fdir.rxq;
1641 const struct rte_eth_fdir_action *fdir_action = &filter->action;
1642 volatile struct i40e_tx_desc *txdp;
1643 volatile struct i40e_filter_program_desc *fdirdp;
1648 PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1649 fdirdp = (volatile struct i40e_filter_program_desc *)
1650 (&(txq->tx_ring[txq->tx_tail]));
1652 fdirdp->qindex_flex_ptype_vsi =
1653 rte_cpu_to_le_32((fdir_action->rx_queue <<
1654 I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1655 I40E_TXD_FLTR_QW0_QINDEX_MASK);
1657 fdirdp->qindex_flex_ptype_vsi |=
1658 rte_cpu_to_le_32((fdir_action->flex_off <<
1659 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1660 I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1662 fdirdp->qindex_flex_ptype_vsi |=
1663 rte_cpu_to_le_32((pctype <<
1664 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1665 I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1667 if (filter->input.flow_ext.is_vf)
1668 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1670 /* Use LAN VSI Id by default */
1671 vsi_id = pf->main_vsi->vsi_id;
1672 fdirdp->qindex_flex_ptype_vsi |=
1673 rte_cpu_to_le_32(((uint32_t)vsi_id <<
1674 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1675 I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1677 fdirdp->dtype_cmd_cntindex =
1678 rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1681 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1682 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1683 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1685 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1686 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1687 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1689 if (fdir_action->behavior == RTE_ETH_FDIR_REJECT)
1690 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1691 else if (fdir_action->behavior == RTE_ETH_FDIR_ACCEPT)
1692 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1693 else if (fdir_action->behavior == RTE_ETH_FDIR_PASSTHRU)
1694 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1696 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1697 " unsupported fdir behavior.");
1701 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1702 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1703 I40E_TXD_FLTR_QW1_DEST_MASK);
1705 fdirdp->dtype_cmd_cntindex |=
1706 rte_cpu_to_le_32((fdir_action->report_status<<
1707 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
1708 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
1710 fdirdp->dtype_cmd_cntindex |=
1711 rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
1712 fdirdp->dtype_cmd_cntindex |=
1714 ((uint32_t)pf->fdir.match_counter_index <<
1715 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1716 I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
1718 fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
1720 PMD_DRV_LOG(INFO, "filling transmit descriptor.");
1721 txdp = &(txq->tx_ring[txq->tx_tail + 1]);
1722 txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
1723 td_cmd = I40E_TX_DESC_CMD_EOP |
1724 I40E_TX_DESC_CMD_RS |
1725 I40E_TX_DESC_CMD_DUMMY;
1727 txdp->cmd_type_offset_bsz =
1728 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
1730 txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
1731 if (txq->tx_tail >= txq->nb_tx_desc)
1733 /* Update the tx tail register */
1735 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
1736 for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
1737 if ((txdp->cmd_type_offset_bsz &
1738 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1739 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1743 if (i >= I40E_FDIR_MAX_WAIT_US) {
1744 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1745 " time out to get DD on tx queue.");
1748 /* totally delay 10 ms to check programming status*/
1749 for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
1750 if (i40e_check_fdir_programming_status(rxq) >= 0)
1755 "Failed to program FDIR filter: programming status reported.");
1760 * i40e_flow_fdir_filter_programming - Program a flow director filter rule.
1761 * Is done by Flow Director Programming Descriptor followed by packet
1762 * structure that contains the filter fields need to match.
1763 * @pf: board private structure
1765 * @filter: fdir filter entry
1766 * @add: 0 - delete, 1 - add
1769 i40e_flow_fdir_filter_programming(struct i40e_pf *pf,
1770 enum i40e_filter_pctype pctype,
1771 const struct i40e_fdir_filter_conf *filter,
1774 struct i40e_tx_queue *txq = pf->fdir.txq;
1775 struct i40e_rx_queue *rxq = pf->fdir.rxq;
1776 const struct i40e_fdir_action *fdir_action = &filter->action;
1777 volatile struct i40e_tx_desc *txdp;
1778 volatile struct i40e_filter_program_desc *fdirdp;
1783 PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1784 fdirdp = (volatile struct i40e_filter_program_desc *)
1785 (&txq->tx_ring[txq->tx_tail]);
1787 fdirdp->qindex_flex_ptype_vsi =
1788 rte_cpu_to_le_32((fdir_action->rx_queue <<
1789 I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1790 I40E_TXD_FLTR_QW0_QINDEX_MASK);
1792 fdirdp->qindex_flex_ptype_vsi |=
1793 rte_cpu_to_le_32((fdir_action->flex_off <<
1794 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1795 I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1797 fdirdp->qindex_flex_ptype_vsi |=
1798 rte_cpu_to_le_32((pctype <<
1799 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1800 I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1802 if (filter->input.flow_ext.is_vf)
1803 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1805 /* Use LAN VSI Id by default */
1806 vsi_id = pf->main_vsi->vsi_id;
1807 fdirdp->qindex_flex_ptype_vsi |=
1808 rte_cpu_to_le_32(((uint32_t)vsi_id <<
1809 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1810 I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1812 fdirdp->dtype_cmd_cntindex =
1813 rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1816 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1817 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1818 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1820 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1821 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1822 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1824 if (fdir_action->behavior == I40E_FDIR_REJECT)
1825 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1826 else if (fdir_action->behavior == I40E_FDIR_ACCEPT)
1827 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1828 else if (fdir_action->behavior == I40E_FDIR_PASSTHRU)
1829 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1831 PMD_DRV_LOG(ERR, "Failed to program FDIR filter: unsupported fdir behavior.");
1835 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1836 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1837 I40E_TXD_FLTR_QW1_DEST_MASK);
1839 fdirdp->dtype_cmd_cntindex |=
1840 rte_cpu_to_le_32((fdir_action->report_status <<
1841 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
1842 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
1844 fdirdp->dtype_cmd_cntindex |=
1845 rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
1846 fdirdp->dtype_cmd_cntindex |=
1848 ((uint32_t)pf->fdir.match_counter_index <<
1849 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1850 I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
1852 fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
1854 PMD_DRV_LOG(INFO, "filling transmit descriptor.");
1855 txdp = &txq->tx_ring[txq->tx_tail + 1];
1856 txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
1857 td_cmd = I40E_TX_DESC_CMD_EOP |
1858 I40E_TX_DESC_CMD_RS |
1859 I40E_TX_DESC_CMD_DUMMY;
1861 txdp->cmd_type_offset_bsz =
1862 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
1864 txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
1865 if (txq->tx_tail >= txq->nb_tx_desc)
1867 /* Update the tx tail register */
1869 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
1870 for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
1871 if ((txdp->cmd_type_offset_bsz &
1872 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1873 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1877 if (i >= I40E_FDIR_MAX_WAIT_US) {
1879 "Failed to program FDIR filter: time out to get DD on tx queue.");
1882 /* totally delay 10 ms to check programming status*/
1883 rte_delay_us(I40E_FDIR_MAX_WAIT_US);
1884 if (i40e_check_fdir_programming_status(rxq) < 0) {
1886 "Failed to program FDIR filter: programming status reported.");
1894 * i40e_fdir_flush - clear all filters of Flow Director table
1895 * @pf: board private structure
1898 i40e_fdir_flush(struct rte_eth_dev *dev)
1900 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1901 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1903 uint16_t guarant_cnt, best_cnt;
1906 I40E_WRITE_REG(hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
1907 I40E_WRITE_FLUSH(hw);
1909 for (i = 0; i < I40E_FDIR_FLUSH_RETRY; i++) {
1910 rte_delay_ms(I40E_FDIR_FLUSH_INTERVAL_MS);
1911 reg = I40E_READ_REG(hw, I40E_PFQF_CTL_1);
1912 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
1915 if (i >= I40E_FDIR_FLUSH_RETRY) {
1916 PMD_DRV_LOG(ERR, "FD table did not flush, may need more time.");
1919 guarant_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
1920 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
1921 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
1922 best_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
1923 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
1924 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
1925 if (guarant_cnt != 0 || best_cnt != 0) {
1926 PMD_DRV_LOG(ERR, "Failed to flush FD table.");
1929 PMD_DRV_LOG(INFO, "FD table Flush success.");
1934 i40e_fdir_info_get_flex_set(struct i40e_pf *pf,
1935 struct rte_eth_flex_payload_cfg *flex_set,
1938 struct i40e_fdir_flex_pit *flex_pit;
1939 struct rte_eth_flex_payload_cfg *ptr = flex_set;
1940 uint16_t src, dst, size, j, k;
1941 uint8_t i, layer_idx;
1943 for (layer_idx = I40E_FLXPLD_L2_IDX;
1944 layer_idx <= I40E_FLXPLD_L4_IDX;
1946 if (layer_idx == I40E_FLXPLD_L2_IDX)
1947 ptr->type = RTE_ETH_L2_PAYLOAD;
1948 else if (layer_idx == I40E_FLXPLD_L3_IDX)
1949 ptr->type = RTE_ETH_L3_PAYLOAD;
1950 else if (layer_idx == I40E_FLXPLD_L4_IDX)
1951 ptr->type = RTE_ETH_L4_PAYLOAD;
1953 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
1954 flex_pit = &pf->fdir.flex_set[layer_idx *
1955 I40E_MAX_FLXPLD_FIED + i];
1956 if (flex_pit->size == 0)
1958 src = flex_pit->src_offset * sizeof(uint16_t);
1959 dst = flex_pit->dst_offset * sizeof(uint16_t);
1960 size = flex_pit->size * sizeof(uint16_t);
1961 for (j = src, k = dst; j < src + size; j++, k++)
1962 ptr->src_offset[k] = j;
1970 i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
1971 struct rte_eth_fdir_flex_mask *flex_mask,
1974 struct i40e_fdir_flex_mask *mask;
1975 struct rte_eth_fdir_flex_mask *ptr = flex_mask;
1978 uint16_t off_bytes, mask_tmp;
1980 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
1981 i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
1983 mask = &pf->fdir.flex_mask[i];
1984 flow_type = i40e_pctype_to_flowtype(pf->adapter,
1985 (enum i40e_filter_pctype)i);
1986 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
1989 for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
1990 if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
1991 ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX;
1992 ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX;
1994 ptr->mask[j * sizeof(uint16_t)] = 0x0;
1995 ptr->mask[j * sizeof(uint16_t) + 1] = 0x0;
1998 for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) {
1999 off_bytes = mask->bitmask[j].offset * sizeof(uint16_t);
2000 mask_tmp = ~mask->bitmask[j].mask;
2001 ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp);
2002 ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp);
2004 ptr->flow_type = flow_type;
2011 * i40e_fdir_info_get - get information of Flow Director
2012 * @pf: ethernet device to get info from
2013 * @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with
2014 * the flow director information.
2017 i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
2019 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2020 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2021 uint16_t num_flex_set = 0;
2022 uint16_t num_flex_mask = 0;
2024 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
2025 fdir->mode = RTE_FDIR_MODE_PERFECT;
2027 fdir->mode = RTE_FDIR_MODE_NONE;
2030 (uint32_t)hw->func_caps.fd_filters_guaranteed;
2032 (uint32_t)hw->func_caps.fd_filters_best_effort;
2033 fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
2034 fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
2035 fdir->flex_payload_unit = sizeof(uint16_t);
2036 fdir->flex_bitmask_unit = sizeof(uint16_t);
2037 fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
2038 fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF;
2039 fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD;
2041 i40e_fdir_info_get_flex_set(pf,
2042 fdir->flex_conf.flex_set,
2044 i40e_fdir_info_get_flex_mask(pf,
2045 fdir->flex_conf.flex_mask,
2048 fdir->flex_conf.nb_payloads = num_flex_set;
2049 fdir->flex_conf.nb_flexmasks = num_flex_mask;
2053 * i40e_fdir_stat_get - get statistics of Flow Director
2054 * @pf: ethernet device to get info from
2055 * @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with
2056 * the flow director statistics.
2059 i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat)
2061 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2062 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2065 fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2067 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2068 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2070 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2071 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2075 i40e_fdir_filter_set(struct rte_eth_dev *dev,
2076 struct rte_eth_fdir_filter_info *info)
2078 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2082 PMD_DRV_LOG(ERR, "Invalid pointer");
2086 switch (info->info_type) {
2087 case RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT:
2088 ret = i40e_fdir_filter_inset_select(pf,
2089 &(info->info.input_set_conf));
2092 PMD_DRV_LOG(ERR, "FD filter info type (%d) not supported",
2101 * i40e_fdir_ctrl_func - deal with all operations on flow director.
2102 * @pf: board private structure
2103 * @filter_op:operation will be taken.
2104 * @arg: a pointer to specific structure corresponding to the filter_op
2107 i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
2108 enum rte_filter_op filter_op,
2111 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2114 if ((pf->flags & I40E_FLAG_FDIR) == 0)
2117 if (filter_op == RTE_ETH_FILTER_NOP)
2120 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
2123 switch (filter_op) {
2124 case RTE_ETH_FILTER_ADD:
2125 ret = i40e_add_del_fdir_filter(dev,
2126 (struct rte_eth_fdir_filter *)arg,
2129 case RTE_ETH_FILTER_DELETE:
2130 ret = i40e_add_del_fdir_filter(dev,
2131 (struct rte_eth_fdir_filter *)arg,
2134 case RTE_ETH_FILTER_FLUSH:
2135 ret = i40e_fdir_flush(dev);
2137 case RTE_ETH_FILTER_INFO:
2138 i40e_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
2140 case RTE_ETH_FILTER_SET:
2141 ret = i40e_fdir_filter_set(dev,
2142 (struct rte_eth_fdir_filter_info *)arg);
2144 case RTE_ETH_FILTER_STATS:
2145 i40e_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
2148 PMD_DRV_LOG(ERR, "unknown operation %u.", filter_op);
2155 /* Restore flow director filter */
2157 i40e_fdir_filter_restore(struct i40e_pf *pf)
2159 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
2160 struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
2161 struct i40e_fdir_filter *f;
2162 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2164 uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
2165 uint32_t best_cnt; /**< Number of filters in best effort spaces. */
2167 TAILQ_FOREACH(f, fdir_list, rules)
2168 i40e_flow_add_del_fdir_filter(dev, &f->fdir, TRUE);
2170 fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
2172 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
2173 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
2175 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
2176 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
2178 PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d",
2179 guarant_cnt, best_cnt);