4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <rte_ether.h>
43 #include <rte_ethdev.h>
45 #include <rte_memzone.h>
46 #include <rte_malloc.h>
53 #include "i40e_logs.h"
54 #include "base/i40e_type.h"
55 #include "base/i40e_prototype.h"
56 #include "i40e_ethdev.h"
57 #include "i40e_rxtx.h"
59 #define I40E_FDIR_MZ_NAME "FDIR_MEMZONE"
61 #define IPV6_ADDR_LEN 16
64 #define I40E_FDIR_PKT_LEN 512
65 #define I40E_FDIR_IP_DEFAULT_LEN 420
66 #define I40E_FDIR_IP_DEFAULT_TTL 0x40
67 #define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45
68 #define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50
69 #define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60000000
71 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF
72 #define I40E_FDIR_IPv6_PAYLOAD_LEN 380
73 #define I40E_FDIR_UDP_DEFAULT_LEN 400
75 /* Wait time for fdir filter programming */
76 #define I40E_FDIR_MAX_WAIT_US 10000
78 /* Wait count and interval for fdir filter flush */
79 #define I40E_FDIR_FLUSH_RETRY 50
80 #define I40E_FDIR_FLUSH_INTERVAL_MS 5
82 #define I40E_COUNTER_PF 2
83 /* Statistic counter index for one pf */
84 #define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF)
86 #define I40E_FDIR_FLOWS ( \
87 (1 << RTE_ETH_FLOW_FRAG_IPV4) | \
88 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
89 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
90 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
91 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
92 (1 << RTE_ETH_FLOW_FRAG_IPV6) | \
93 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
94 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
95 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
96 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
97 (1 << RTE_ETH_FLOW_L2_PAYLOAD))
99 static int i40e_fdir_filter_programming(struct i40e_pf *pf,
100 enum i40e_filter_pctype pctype,
101 const struct rte_eth_fdir_filter *filter,
103 static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
104 struct i40e_fdir_filter *filter);
105 static struct i40e_fdir_filter *
106 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
107 const struct rte_eth_fdir_input *input);
108 static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
109 struct i40e_fdir_filter *filter);
112 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
114 struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
115 struct i40e_hmc_obj_rxq rx_ctx;
116 int err = I40E_SUCCESS;
118 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
119 /* Init the RX queue in hardware */
120 rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT;
122 rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
123 rx_ctx.qlen = rxq->nb_rx_desc;
124 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
127 rx_ctx.dtype = i40e_header_split_none;
128 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
129 rx_ctx.rxmax = ETHER_MAX_LEN;
130 rx_ctx.tphrdesc_ena = 1;
131 rx_ctx.tphwdesc_ena = 1;
132 rx_ctx.tphdata_ena = 1;
133 rx_ctx.tphhead_ena = 1;
134 rx_ctx.lrxqthresh = 2;
140 err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx);
141 if (err != I40E_SUCCESS) {
142 PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context.");
145 err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx);
146 if (err != I40E_SUCCESS) {
147 PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context.");
150 rxq->qrx_tail = hw->hw_addr +
151 I40E_QRX_TAIL(rxq->vsi->base_queue);
154 /* Init the RX tail regieter. */
155 I40E_PCI_REG_WRITE(rxq->qrx_tail, 0);
156 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
162 * i40e_fdir_setup - reserve and initialize the Flow Director resources
163 * @pf: board private structure
166 i40e_fdir_setup(struct i40e_pf *pf)
168 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
169 struct i40e_vsi *vsi;
170 int err = I40E_SUCCESS;
171 char z_name[RTE_MEMZONE_NAMESIZE];
172 const struct rte_memzone *mz = NULL;
173 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
175 if ((pf->flags & I40E_FLAG_FDIR) == 0) {
176 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
177 return I40E_NOT_SUPPORTED;
180 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u,"
181 " num_filters_best_effort = %u.",
182 hw->func_caps.fd_filters_guaranteed,
183 hw->func_caps.fd_filters_best_effort);
185 vsi = pf->fdir.fdir_vsi;
187 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
190 /* make new FDIR VSI */
191 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
193 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
194 return I40E_ERR_NO_AVAILABLE_VSI;
196 pf->fdir.fdir_vsi = vsi;
198 /*Fdir tx queue setup*/
199 err = i40e_fdir_setup_tx_resources(pf);
201 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
205 /*Fdir rx queue setup*/
206 err = i40e_fdir_setup_rx_resources(pf);
208 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
212 err = i40e_tx_queue_init(pf->fdir.txq);
214 PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization.");
218 /* need switch on before dev start*/
219 err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE);
221 PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on.");
225 /* Init the rx queue in hardware */
226 err = i40e_fdir_rx_queue_init(pf->fdir.rxq);
228 PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization.");
232 /* switch on rx queue */
233 err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE);
235 PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on.");
239 /* reserve memory for the fdir programming packet */
240 snprintf(z_name, sizeof(z_name), "%s_%s_%d",
241 eth_dev->device->driver->name,
243 eth_dev->data->port_id);
244 mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
246 PMD_DRV_LOG(ERR, "Cannot init memzone for "
247 "flow director program packet.");
248 err = I40E_ERR_NO_MEMORY;
251 pf->fdir.prg_pkt = mz->addr;
252 pf->fdir.dma_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
254 pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
255 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
260 i40e_dev_rx_queue_release(pf->fdir.rxq);
263 i40e_dev_tx_queue_release(pf->fdir.txq);
266 i40e_vsi_release(vsi);
267 pf->fdir.fdir_vsi = NULL;
272 * i40e_fdir_teardown - release the Flow Director resources
273 * @pf: board private structure
276 i40e_fdir_teardown(struct i40e_pf *pf)
278 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
279 struct i40e_vsi *vsi;
281 vsi = pf->fdir.fdir_vsi;
284 int err = i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
286 PMD_DRV_LOG(DEBUG, "Failed to do FDIR TX switch off");
287 err = i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
289 PMD_DRV_LOG(DEBUG, "Failed to do FDIR RX switch off");
290 i40e_dev_rx_queue_release(pf->fdir.rxq);
292 i40e_dev_tx_queue_release(pf->fdir.txq);
294 i40e_vsi_release(vsi);
295 pf->fdir.fdir_vsi = NULL;
298 /* check whether the flow director table in empty */
300 i40e_fdir_empty(struct i40e_hw *hw)
302 uint32_t guarant_cnt, best_cnt;
304 guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
305 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
306 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
307 best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
308 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
309 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
310 if (best_cnt + guarant_cnt > 0)
317 * Initialize the configuration about bytes stream extracted as flexible payload
321 i40e_init_flx_pld(struct i40e_pf *pf)
323 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
329 * Define the bytes stream extracted as flexible payload in
330 * field vector. By default, select 8 words from the beginning
331 * of payload as flexible payload.
333 for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) {
334 index = i * I40E_MAX_FLXPLD_FIED;
335 pf->fdir.flex_set[index].src_offset = 0;
336 pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM;
337 pf->fdir.flex_set[index].dst_offset = 0;
338 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900);
340 I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/
342 I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/
345 /* initialize the masks */
346 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
347 pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
348 flow_type = i40e_pctype_to_flowtype(pf->adapter, pctype);
350 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
352 pf->fdir.flex_mask[pctype].word_mask = 0;
353 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
354 for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
355 pf->fdir.flex_mask[pctype].bitmask[i].offset = 0;
356 pf->fdir.flex_mask[pctype].bitmask[i].mask = 0;
357 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0);
362 #define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
363 if ((flex_pit2).src_offset < \
364 (flex_pit1).src_offset + (flex_pit1).size) { \
365 PMD_DRV_LOG(ERR, "src_offset should be not" \
366 " less than than previous offset" \
367 " + previous FSIZE."); \
373 * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure,
374 * and the flex_pit will be sorted by it's src_offset value
376 static inline uint16_t
377 i40e_srcoff_to_flx_pit(const uint16_t *src_offset,
378 struct i40e_fdir_flex_pit *flex_pit)
380 uint16_t src_tmp, size, num = 0;
381 uint16_t i, k, j = 0;
383 while (j < I40E_FDIR_MAX_FLEX_LEN) {
385 for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) {
386 if (src_offset[j + 1] == src_offset[j] + 1)
391 src_tmp = src_offset[j] + 1 - size;
392 /* the flex_pit need to be sort by src_offset */
393 for (i = 0; i < num; i++) {
394 if (src_tmp < flex_pit[i].src_offset)
397 /* if insert required, move backward */
398 for (k = num; k > i; k--)
399 flex_pit[k] = flex_pit[k - 1];
401 flex_pit[i].dst_offset = j + 1 - size;
402 flex_pit[i].src_offset = src_tmp;
403 flex_pit[i].size = size;
410 /* i40e_check_fdir_flex_payload -check flex payload configuration arguments */
412 i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
414 struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN];
417 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
418 if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
419 PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
424 memset(flex_pit, 0, sizeof(flex_pit));
425 num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
426 if (num > I40E_MAX_FLXPLD_FIED) {
427 PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
430 for (i = 0; i < num; i++) {
431 if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 ||
432 flex_pit[i].src_offset & 0x01) {
433 PMD_DRV_LOG(ERR, "flexpayload should be measured"
438 I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]);
444 * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration
445 * arguments are valid
448 i40e_check_fdir_flex_conf(const struct i40e_adapter *adapter,
449 const struct rte_eth_fdir_flex_conf *conf)
451 const struct rte_eth_flex_payload_cfg *flex_cfg;
452 const struct rte_eth_fdir_flex_mask *flex_mask;
457 enum i40e_filter_pctype pctype;
460 PMD_DRV_LOG(INFO, "NULL pointer.");
463 /* check flexible payload setting configuration */
464 if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) {
465 PMD_DRV_LOG(ERR, "invalid number of payload setting.");
468 for (i = 0; i < conf->nb_payloads; i++) {
469 flex_cfg = &conf->flex_set[i];
470 if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) {
471 PMD_DRV_LOG(ERR, "invalid payload type.");
474 ret = i40e_check_fdir_flex_payload(flex_cfg);
476 PMD_DRV_LOG(ERR, "invalid flex payload arguments.");
481 /* check flex mask setting configuration */
482 if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) {
483 PMD_DRV_LOG(ERR, "invalid number of flex masks.");
486 for (i = 0; i < conf->nb_flexmasks; i++) {
487 flex_mask = &conf->flex_mask[i];
488 pctype = i40e_flowtype_to_pctype(adapter, flex_mask->flow_type);
489 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
490 PMD_DRV_LOG(WARNING, "invalid flow type.");
494 for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) {
495 mask_tmp = I40E_WORD(flex_mask->mask[j],
496 flex_mask->mask[j + 1]);
497 if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) {
499 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) {
500 PMD_DRV_LOG(ERR, " exceed maximal"
501 " number of bitmasks.");
511 * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload
512 * @pf: board private structure
513 * @cfg: the rule how bytes stream is extracted as flexible payload
516 i40e_set_flx_pld_cfg(struct i40e_pf *pf,
517 const struct rte_eth_flex_payload_cfg *cfg)
519 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
520 struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
522 uint16_t num, min_next_off; /* in words */
523 uint8_t field_idx = 0;
524 uint8_t layer_idx = 0;
527 if (cfg->type == RTE_ETH_L2_PAYLOAD)
528 layer_idx = I40E_FLXPLD_L2_IDX;
529 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
530 layer_idx = I40E_FLXPLD_L3_IDX;
531 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
532 layer_idx = I40E_FLXPLD_L4_IDX;
534 memset(flex_pit, 0, sizeof(flex_pit));
535 num = i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit);
537 for (i = 0; i < RTE_MIN(num, RTE_DIM(flex_pit)); i++) {
538 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
539 /* record the info in fdir structure */
540 pf->fdir.flex_set[field_idx].src_offset =
541 flex_pit[i].src_offset / sizeof(uint16_t);
542 pf->fdir.flex_set[field_idx].size =
543 flex_pit[i].size / sizeof(uint16_t);
544 pf->fdir.flex_set[field_idx].dst_offset =
545 flex_pit[i].dst_offset / sizeof(uint16_t);
546 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
547 pf->fdir.flex_set[field_idx].size,
548 pf->fdir.flex_set[field_idx].dst_offset);
550 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
552 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
553 pf->fdir.flex_set[field_idx].size;
555 for (; i < I40E_MAX_FLXPLD_FIED; i++) {
556 /* set the non-used register obeying register's constrain */
557 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
558 NONUSE_FLX_PIT_DEST_OFF);
560 I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i),
567 * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload
568 * @pf: board private structure
569 * @pctype: packet classify type
570 * @flex_masks: mask for flexible payload
573 i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
574 enum i40e_filter_pctype pctype,
575 const struct rte_eth_fdir_flex_mask *mask_cfg)
577 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
578 struct i40e_fdir_flex_mask *flex_mask;
579 uint32_t flxinset, fd_mask;
581 uint8_t i, nb_bitmask = 0;
583 flex_mask = &pf->fdir.flex_mask[pctype];
584 memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
585 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
586 mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]);
587 if (mask_tmp != 0x0) {
588 flex_mask->word_mask |=
589 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
590 if (mask_tmp != UINT16_MAX) {
592 flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp;
593 flex_mask->bitmask[nb_bitmask].offset =
594 i / sizeof(uint16_t);
599 /* write mask to hw */
600 flxinset = (flex_mask->word_mask <<
601 I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
602 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
603 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
605 for (i = 0; i < nb_bitmask; i++) {
606 fd_mask = (flex_mask->bitmask[i].mask <<
607 I40E_PRTQF_FD_MSK_MASK_SHIFT) &
608 I40E_PRTQF_FD_MSK_MASK_MASK;
609 fd_mask |= ((flex_mask->bitmask[i].offset +
610 I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
611 I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
612 I40E_PRTQF_FD_MSK_OFFSET_MASK;
613 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
618 * Configure flow director related setting
621 i40e_fdir_configure(struct rte_eth_dev *dev)
623 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
624 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
625 struct rte_eth_fdir_flex_conf *conf;
626 enum i40e_filter_pctype pctype;
632 * configuration need to be done before
633 * flow director filters are added
634 * If filters exist, flush them.
636 if (i40e_fdir_empty(hw) < 0) {
637 ret = i40e_fdir_flush(dev);
639 PMD_DRV_LOG(ERR, "failed to flush fdir table.");
644 /* enable FDIR filter */
645 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
646 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
647 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
649 i40e_init_flx_pld(pf); /* set flex config to default value */
651 conf = &dev->data->dev_conf.fdir_conf.flex_conf;
652 ret = i40e_check_fdir_flex_conf(pf->adapter, conf);
654 PMD_DRV_LOG(ERR, " invalid configuration arguments.");
657 /* configure flex payload */
658 for (i = 0; i < conf->nb_payloads; i++)
659 i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
660 /* configure flex mask*/
661 for (i = 0; i < conf->nb_flexmasks; i++) {
662 if (hw->mac.type == I40E_MAC_X722) {
663 /* get translated pctype value in fd pctype register */
664 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
665 hw, I40E_GLQF_FD_PCTYPES(
666 (int)i40e_flowtype_to_pctype(pf->adapter,
667 conf->flex_mask[i].flow_type)));
669 pctype = i40e_flowtype_to_pctype(pf->adapter,
670 conf->flex_mask[i].flow_type);
672 i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]);
679 i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
680 unsigned char *raw_pkt,
683 static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
684 uint16_t *ether_type;
685 uint8_t len = 2 * sizeof(struct ether_addr);
687 struct ipv6_hdr *ip6;
688 static const uint8_t next_proto[] = {
689 [RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP,
690 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
691 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
692 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = IPPROTO_SCTP,
693 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = IPPROTO_IP,
694 [RTE_ETH_FLOW_FRAG_IPV6] = IPPROTO_NONE,
695 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
696 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
697 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = IPPROTO_SCTP,
698 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = IPPROTO_NONE,
701 raw_pkt += 2 * sizeof(struct ether_addr);
702 if (vlan && fdir_input->flow_ext.vlan_tci) {
703 rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
704 rte_memcpy(raw_pkt + sizeof(uint16_t),
705 &fdir_input->flow_ext.vlan_tci,
707 raw_pkt += sizeof(vlan_frame);
708 len += sizeof(vlan_frame);
710 ether_type = (uint16_t *)raw_pkt;
711 raw_pkt += sizeof(uint16_t);
712 len += sizeof(uint16_t);
714 switch (fdir_input->flow_type) {
715 case RTE_ETH_FLOW_L2_PAYLOAD:
716 *ether_type = fdir_input->flow.l2_flow.ether_type;
718 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
719 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
720 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
721 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
722 case RTE_ETH_FLOW_FRAG_IPV4:
723 ip = (struct ipv4_hdr *)raw_pkt;
725 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
726 ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
727 /* set len to by default */
728 ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
729 ip->next_proto_id = fdir_input->flow.ip4_flow.proto ?
730 fdir_input->flow.ip4_flow.proto :
731 next_proto[fdir_input->flow_type];
732 ip->time_to_live = fdir_input->flow.ip4_flow.ttl ?
733 fdir_input->flow.ip4_flow.ttl :
734 I40E_FDIR_IP_DEFAULT_TTL;
735 ip->type_of_service = fdir_input->flow.ip4_flow.tos;
737 * The source and destination fields in the transmitted packet
738 * need to be presented in a reversed order with respect
739 * to the expected received packets.
741 ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
742 ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
743 len += sizeof(struct ipv4_hdr);
745 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
746 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
747 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
748 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
749 case RTE_ETH_FLOW_FRAG_IPV6:
750 ip6 = (struct ipv6_hdr *)raw_pkt;
752 *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
754 rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
755 (fdir_input->flow.ipv6_flow.tc <<
756 I40E_FDIR_IPv6_TC_OFFSET));
758 rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
759 ip6->proto = fdir_input->flow.ipv6_flow.proto ?
760 fdir_input->flow.ipv6_flow.proto :
761 next_proto[fdir_input->flow_type];
762 ip6->hop_limits = fdir_input->flow.ipv6_flow.hop_limits ?
763 fdir_input->flow.ipv6_flow.hop_limits :
764 I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
766 * The source and destination fields in the transmitted packet
767 * need to be presented in a reversed order with respect
768 * to the expected received packets.
770 rte_memcpy(&(ip6->src_addr),
771 &(fdir_input->flow.ipv6_flow.dst_ip),
773 rte_memcpy(&(ip6->dst_addr),
774 &(fdir_input->flow.ipv6_flow.src_ip),
776 len += sizeof(struct ipv6_hdr);
779 PMD_DRV_LOG(ERR, "unknown flow type %u.",
780 fdir_input->flow_type);
788 * i40e_fdir_construct_pkt - construct packet based on fields in input
789 * @pf: board private structure
790 * @fdir_input: input set of the flow director entry
791 * @raw_pkt: a packet to be constructed
794 i40e_fdir_construct_pkt(struct i40e_pf *pf,
795 const struct rte_eth_fdir_input *fdir_input,
796 unsigned char *raw_pkt)
798 unsigned char *payload, *ptr;
801 struct sctp_hdr *sctp;
802 uint8_t size, dst = 0;
803 uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
806 /* fill the ethernet and IP head */
807 len = i40e_fdir_fill_eth_ip_head(fdir_input, raw_pkt,
808 !!fdir_input->flow_ext.vlan_tci);
812 /* fill the L4 head */
813 switch (fdir_input->flow_type) {
814 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
815 udp = (struct udp_hdr *)(raw_pkt + len);
816 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
818 * The source and destination fields in the transmitted packet
819 * need to be presented in a reversed order with respect
820 * to the expected received packets.
822 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
823 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
824 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
827 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
828 tcp = (struct tcp_hdr *)(raw_pkt + len);
829 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
831 * The source and destination fields in the transmitted packet
832 * need to be presented in a reversed order with respect
833 * to the expected received packets.
835 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
836 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
837 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
840 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
841 sctp = (struct sctp_hdr *)(raw_pkt + len);
842 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
844 * The source and destination fields in the transmitted packet
845 * need to be presented in a reversed order with respect
846 * to the expected received packets.
848 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
849 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
850 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
853 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
854 case RTE_ETH_FLOW_FRAG_IPV4:
855 payload = raw_pkt + len;
856 set_idx = I40E_FLXPLD_L3_IDX;
859 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
860 udp = (struct udp_hdr *)(raw_pkt + len);
861 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
863 * The source and destination fields in the transmitted packet
864 * need to be presented in a reversed order with respect
865 * to the expected received packets.
867 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
868 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
869 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
872 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
873 tcp = (struct tcp_hdr *)(raw_pkt + len);
874 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
876 * The source and destination fields in the transmitted packet
877 * need to be presented in a reversed order with respect
878 * to the expected received packets.
880 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
881 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
882 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
885 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
886 sctp = (struct sctp_hdr *)(raw_pkt + len);
887 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
889 * The source and destination fields in the transmitted packet
890 * need to be presented in a reversed order with respect
891 * to the expected received packets.
893 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
894 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
895 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
898 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
899 case RTE_ETH_FLOW_FRAG_IPV6:
900 payload = raw_pkt + len;
901 set_idx = I40E_FLXPLD_L3_IDX;
903 case RTE_ETH_FLOW_L2_PAYLOAD:
904 payload = raw_pkt + len;
906 * ARP packet is a special case on which the payload
907 * starts after the whole ARP header
909 if (fdir_input->flow.l2_flow.ether_type ==
910 rte_cpu_to_be_16(ETHER_TYPE_ARP))
911 payload += sizeof(struct arp_hdr);
912 set_idx = I40E_FLXPLD_L2_IDX;
915 PMD_DRV_LOG(ERR, "unknown flow type %u.", fdir_input->flow_type);
919 /* fill the flexbytes to payload */
920 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
921 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
922 size = pf->fdir.flex_set[pit_idx].size;
925 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
927 pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
929 &fdir_input->flow_ext.flexbytes[dst],
930 size * sizeof(uint16_t));
936 /* Construct the tx flags */
937 static inline uint64_t
938 i40e_build_ctob(uint32_t td_cmd,
943 return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
944 ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
945 ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
946 ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
947 ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
951 * check the programming status descriptor in rx queue.
952 * done after Programming Flow Director is programmed on
956 i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
958 volatile union i40e_rx_desc *rxdp;
965 rxdp = &rxq->rx_ring[rxq->rx_tail];
966 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
967 rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
968 >> I40E_RXD_QW1_STATUS_SHIFT;
970 if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
971 len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT;
972 id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
973 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
975 if (len == I40E_RX_PROG_STATUS_DESC_LENGTH &&
976 id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) {
978 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
979 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
981 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
982 PMD_DRV_LOG(ERR, "Failed to add FDIR filter"
983 " (FD_ID %u): programming status"
985 rxdp->wb.qword0.hi_dword.fd_id);
987 } else if (error == (0x1 <<
988 I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
989 PMD_DRV_LOG(ERR, "Failed to delete FDIR filter"
990 " (FD_ID %u): programming status"
992 rxdp->wb.qword0.hi_dword.fd_id);
995 PMD_DRV_LOG(ERR, "invalid programming status"
996 " reported, error = %u.", error);
998 PMD_DRV_LOG(ERR, "unknown programming status"
999 " reported, len = %d, id = %u.", len, id);
1000 rxdp->wb.qword1.status_error_len = 0;
1002 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
1009 i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
1010 struct i40e_fdir_filter *filter)
1012 rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
1016 /* Check if there exists the flow director filter */
1017 static struct i40e_fdir_filter *
1018 i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
1019 const struct rte_eth_fdir_input *input)
1023 ret = rte_hash_lookup(fdir_info->hash_table, (const void *)input);
1027 return fdir_info->hash_map[ret];
1030 /* Add a flow director filter into the SW list */
1032 i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
1034 struct i40e_fdir_info *fdir_info = &pf->fdir;
1037 ret = rte_hash_add_key(fdir_info->hash_table,
1038 &filter->fdir.input);
1041 "Failed to insert fdir filter to hash table %d!",
1045 fdir_info->hash_map[ret] = filter;
1047 TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
1052 /* Delete a flow director filter from the SW list */
1054 i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
1056 struct i40e_fdir_info *fdir_info = &pf->fdir;
1057 struct i40e_fdir_filter *filter;
1060 ret = rte_hash_del_key(fdir_info->hash_table, input);
1063 "Failed to delete fdir filter to hash table %d!",
1067 filter = fdir_info->hash_map[ret];
1068 fdir_info->hash_map[ret] = NULL;
1070 TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
1077 * i40e_add_del_fdir_filter - add or remove a flow director filter.
1078 * @pf: board private structure
1079 * @filter: fdir filter entry
1080 * @add: 0 - delete, 1 - add
1083 i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
1084 const struct rte_eth_fdir_filter *filter,
1087 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1088 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1089 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1090 enum i40e_filter_pctype pctype;
1091 struct i40e_fdir_info *fdir_info = &pf->fdir;
1092 struct i40e_fdir_filter *fdir_filter, *node;
1093 struct i40e_fdir_filter check_filter; /* Check if the filter exists */
1096 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1097 PMD_DRV_LOG(ERR, "FDIR is not enabled, please"
1098 " check the mode in fdir_conf.");
1102 pctype = i40e_flowtype_to_pctype(pf->adapter, filter->input.flow_type);
1103 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
1104 PMD_DRV_LOG(ERR, "invalid flow_type input.");
1107 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1108 PMD_DRV_LOG(ERR, "Invalid queue ID");
1111 if (filter->input.flow_ext.is_vf &&
1112 filter->input.flow_ext.dst_id >= pf->vf_num) {
1113 PMD_DRV_LOG(ERR, "Invalid VF ID");
1117 /* Check if there is the filter in SW list */
1118 memset(&check_filter, 0, sizeof(check_filter));
1119 i40e_fdir_filter_convert(filter, &check_filter);
1120 node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
1123 "Conflict with existing flow director rules!");
1127 if (!add && !node) {
1129 "There's no corresponding flow firector filter!");
1133 memset(pkt, 0, I40E_FDIR_PKT_LEN);
1135 ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
1137 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1141 if (hw->mac.type == I40E_MAC_X722) {
1142 /* get translated pctype value in fd pctype register */
1143 pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl(
1144 hw, I40E_GLQF_FD_PCTYPES((int)pctype));
1146 ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
1148 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1154 fdir_filter = rte_zmalloc("fdir_filter",
1155 sizeof(*fdir_filter), 0);
1156 rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
1157 ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
1159 ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
1166 * i40e_fdir_filter_programming - Program a flow director filter rule.
1167 * Is done by Flow Director Programming Descriptor followed by packet
1168 * structure that contains the filter fields need to match.
1169 * @pf: board private structure
1171 * @filter: fdir filter entry
1172 * @add: 0 - delete, 1 - add
1175 i40e_fdir_filter_programming(struct i40e_pf *pf,
1176 enum i40e_filter_pctype pctype,
1177 const struct rte_eth_fdir_filter *filter,
1180 struct i40e_tx_queue *txq = pf->fdir.txq;
1181 struct i40e_rx_queue *rxq = pf->fdir.rxq;
1182 const struct rte_eth_fdir_action *fdir_action = &filter->action;
1183 volatile struct i40e_tx_desc *txdp;
1184 volatile struct i40e_filter_program_desc *fdirdp;
1189 PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1190 fdirdp = (volatile struct i40e_filter_program_desc *)
1191 (&(txq->tx_ring[txq->tx_tail]));
1193 fdirdp->qindex_flex_ptype_vsi =
1194 rte_cpu_to_le_32((fdir_action->rx_queue <<
1195 I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1196 I40E_TXD_FLTR_QW0_QINDEX_MASK);
1198 fdirdp->qindex_flex_ptype_vsi |=
1199 rte_cpu_to_le_32((fdir_action->flex_off <<
1200 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1201 I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1203 fdirdp->qindex_flex_ptype_vsi |=
1204 rte_cpu_to_le_32((pctype <<
1205 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1206 I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1208 if (filter->input.flow_ext.is_vf)
1209 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1211 /* Use LAN VSI Id by default */
1212 vsi_id = pf->main_vsi->vsi_id;
1213 fdirdp->qindex_flex_ptype_vsi |=
1214 rte_cpu_to_le_32(((uint32_t)vsi_id <<
1215 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1216 I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1218 fdirdp->dtype_cmd_cntindex =
1219 rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1222 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1223 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1224 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1226 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1227 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1228 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1230 if (fdir_action->behavior == RTE_ETH_FDIR_REJECT)
1231 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1232 else if (fdir_action->behavior == RTE_ETH_FDIR_ACCEPT)
1233 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1234 else if (fdir_action->behavior == RTE_ETH_FDIR_PASSTHRU)
1235 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1237 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1238 " unsupported fdir behavior.");
1242 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1243 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1244 I40E_TXD_FLTR_QW1_DEST_MASK);
1246 fdirdp->dtype_cmd_cntindex |=
1247 rte_cpu_to_le_32((fdir_action->report_status<<
1248 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
1249 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
1251 fdirdp->dtype_cmd_cntindex |=
1252 rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
1253 fdirdp->dtype_cmd_cntindex |=
1255 ((uint32_t)pf->fdir.match_counter_index <<
1256 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1257 I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
1259 fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
1261 PMD_DRV_LOG(INFO, "filling transmit descriptor.");
1262 txdp = &(txq->tx_ring[txq->tx_tail + 1]);
1263 txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
1264 td_cmd = I40E_TX_DESC_CMD_EOP |
1265 I40E_TX_DESC_CMD_RS |
1266 I40E_TX_DESC_CMD_DUMMY;
1268 txdp->cmd_type_offset_bsz =
1269 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
1271 txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
1272 if (txq->tx_tail >= txq->nb_tx_desc)
1274 /* Update the tx tail register */
1276 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
1277 for (i = 0; i < I40E_FDIR_MAX_WAIT_US; i++) {
1278 if ((txdp->cmd_type_offset_bsz &
1279 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1280 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1284 if (i >= I40E_FDIR_MAX_WAIT_US) {
1285 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1286 " time out to get DD on tx queue.");
1289 /* totally delay 10 ms to check programming status*/
1290 for (; i < I40E_FDIR_MAX_WAIT_US; i++) {
1291 if (i40e_check_fdir_programming_status(rxq) >= 0)
1296 "Failed to program FDIR filter: programming status reported.");
1301 * i40e_fdir_flush - clear all filters of Flow Director table
1302 * @pf: board private structure
1305 i40e_fdir_flush(struct rte_eth_dev *dev)
1307 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1308 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1310 uint16_t guarant_cnt, best_cnt;
1313 I40E_WRITE_REG(hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
1314 I40E_WRITE_FLUSH(hw);
1316 for (i = 0; i < I40E_FDIR_FLUSH_RETRY; i++) {
1317 rte_delay_ms(I40E_FDIR_FLUSH_INTERVAL_MS);
1318 reg = I40E_READ_REG(hw, I40E_PFQF_CTL_1);
1319 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
1322 if (i >= I40E_FDIR_FLUSH_RETRY) {
1323 PMD_DRV_LOG(ERR, "FD table did not flush, may need more time.");
1326 guarant_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
1327 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
1328 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
1329 best_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
1330 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
1331 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
1332 if (guarant_cnt != 0 || best_cnt != 0) {
1333 PMD_DRV_LOG(ERR, "Failed to flush FD table.");
1336 PMD_DRV_LOG(INFO, "FD table Flush success.");
1341 i40e_fdir_info_get_flex_set(struct i40e_pf *pf,
1342 struct rte_eth_flex_payload_cfg *flex_set,
1345 struct i40e_fdir_flex_pit *flex_pit;
1346 struct rte_eth_flex_payload_cfg *ptr = flex_set;
1347 uint16_t src, dst, size, j, k;
1348 uint8_t i, layer_idx;
1350 for (layer_idx = I40E_FLXPLD_L2_IDX;
1351 layer_idx <= I40E_FLXPLD_L4_IDX;
1353 if (layer_idx == I40E_FLXPLD_L2_IDX)
1354 ptr->type = RTE_ETH_L2_PAYLOAD;
1355 else if (layer_idx == I40E_FLXPLD_L3_IDX)
1356 ptr->type = RTE_ETH_L3_PAYLOAD;
1357 else if (layer_idx == I40E_FLXPLD_L4_IDX)
1358 ptr->type = RTE_ETH_L4_PAYLOAD;
1360 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
1361 flex_pit = &pf->fdir.flex_set[layer_idx *
1362 I40E_MAX_FLXPLD_FIED + i];
1363 if (flex_pit->size == 0)
1365 src = flex_pit->src_offset * sizeof(uint16_t);
1366 dst = flex_pit->dst_offset * sizeof(uint16_t);
1367 size = flex_pit->size * sizeof(uint16_t);
1368 for (j = src, k = dst; j < src + size; j++, k++)
1369 ptr->src_offset[k] = j;
1377 i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
1378 struct rte_eth_fdir_flex_mask *flex_mask,
1381 struct i40e_fdir_flex_mask *mask;
1382 struct rte_eth_fdir_flex_mask *ptr = flex_mask;
1385 uint16_t off_bytes, mask_tmp;
1387 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
1388 i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
1390 mask = &pf->fdir.flex_mask[i];
1391 flow_type = i40e_pctype_to_flowtype(pf->adapter,
1392 (enum i40e_filter_pctype)i);
1393 if (flow_type == RTE_ETH_FLOW_UNKNOWN)
1396 for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
1397 if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
1398 ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX;
1399 ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX;
1401 ptr->mask[j * sizeof(uint16_t)] = 0x0;
1402 ptr->mask[j * sizeof(uint16_t) + 1] = 0x0;
1405 for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) {
1406 off_bytes = mask->bitmask[j].offset * sizeof(uint16_t);
1407 mask_tmp = ~mask->bitmask[j].mask;
1408 ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp);
1409 ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp);
1411 ptr->flow_type = flow_type;
1418 * i40e_fdir_info_get - get information of Flow Director
1419 * @pf: ethernet device to get info from
1420 * @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with
1421 * the flow director information.
1424 i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
1426 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1427 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1428 uint16_t num_flex_set = 0;
1429 uint16_t num_flex_mask = 0;
1431 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
1432 fdir->mode = RTE_FDIR_MODE_PERFECT;
1434 fdir->mode = RTE_FDIR_MODE_NONE;
1437 (uint32_t)hw->func_caps.fd_filters_guaranteed;
1439 (uint32_t)hw->func_caps.fd_filters_best_effort;
1440 fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
1441 fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
1442 fdir->flex_payload_unit = sizeof(uint16_t);
1443 fdir->flex_bitmask_unit = sizeof(uint16_t);
1444 fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
1445 fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF;
1446 fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD;
1448 i40e_fdir_info_get_flex_set(pf,
1449 fdir->flex_conf.flex_set,
1451 i40e_fdir_info_get_flex_mask(pf,
1452 fdir->flex_conf.flex_mask,
1455 fdir->flex_conf.nb_payloads = num_flex_set;
1456 fdir->flex_conf.nb_flexmasks = num_flex_mask;
1460 * i40e_fdir_stat_get - get statistics of Flow Director
1461 * @pf: ethernet device to get info from
1462 * @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with
1463 * the flow director statistics.
1466 i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat)
1468 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1469 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1472 fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
1474 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
1475 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
1477 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
1478 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
1482 i40e_fdir_filter_set(struct rte_eth_dev *dev,
1483 struct rte_eth_fdir_filter_info *info)
1485 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1489 PMD_DRV_LOG(ERR, "Invalid pointer");
1493 switch (info->info_type) {
1494 case RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT:
1495 ret = i40e_fdir_filter_inset_select(pf,
1496 &(info->info.input_set_conf));
1499 PMD_DRV_LOG(ERR, "FD filter info type (%d) not supported",
1508 * i40e_fdir_ctrl_func - deal with all operations on flow director.
1509 * @pf: board private structure
1510 * @filter_op:operation will be taken.
1511 * @arg: a pointer to specific structure corresponding to the filter_op
1514 i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
1515 enum rte_filter_op filter_op,
1518 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1521 if ((pf->flags & I40E_FLAG_FDIR) == 0)
1524 if (filter_op == RTE_ETH_FILTER_NOP)
1527 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
1530 switch (filter_op) {
1531 case RTE_ETH_FILTER_ADD:
1532 ret = i40e_add_del_fdir_filter(dev,
1533 (struct rte_eth_fdir_filter *)arg,
1536 case RTE_ETH_FILTER_DELETE:
1537 ret = i40e_add_del_fdir_filter(dev,
1538 (struct rte_eth_fdir_filter *)arg,
1541 case RTE_ETH_FILTER_FLUSH:
1542 ret = i40e_fdir_flush(dev);
1544 case RTE_ETH_FILTER_INFO:
1545 i40e_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
1547 case RTE_ETH_FILTER_SET:
1548 ret = i40e_fdir_filter_set(dev,
1549 (struct rte_eth_fdir_filter_info *)arg);
1551 case RTE_ETH_FILTER_STATS:
1552 i40e_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
1555 PMD_DRV_LOG(ERR, "unknown operation %u.", filter_op);
1562 /* Restore flow director filter */
1564 i40e_fdir_filter_restore(struct i40e_pf *pf)
1566 struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
1567 struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
1568 struct i40e_fdir_filter *f;
1569 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1571 uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
1572 uint32_t best_cnt; /**< Number of filters in best effort spaces. */
1574 TAILQ_FOREACH(f, fdir_list, rules)
1575 i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
1577 fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
1579 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
1580 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
1582 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
1583 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
1585 PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d",
1586 guarant_cnt, best_cnt);