4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <rte_ether.h>
43 #include <rte_ethdev.h>
45 #include <rte_memzone.h>
46 #include <rte_malloc.h>
53 #include "i40e_logs.h"
54 #include "base/i40e_type.h"
55 #include "base/i40e_prototype.h"
56 #include "i40e_ethdev.h"
57 #include "i40e_rxtx.h"
59 #define I40E_FDIR_MZ_NAME "FDIR_MEMZONE"
61 #define IPV6_ADDR_LEN 16
64 #define I40E_FDIR_PKT_LEN 512
65 #define I40E_FDIR_IP_DEFAULT_LEN 420
66 #define I40E_FDIR_IP_DEFAULT_TTL 0x40
67 #define I40E_FDIR_IP_DEFAULT_VERSION_IHL 0x45
68 #define I40E_FDIR_TCP_DEFAULT_DATAOFF 0x50
69 #define I40E_FDIR_IPv6_DEFAULT_VTC_FLOW 0x60300000
70 #define I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS 0xFF
71 #define I40E_FDIR_IPv6_PAYLOAD_LEN 380
72 #define I40E_FDIR_UDP_DEFAULT_LEN 400
74 /* Wait count and interval for fdir filter programming */
75 #define I40E_FDIR_WAIT_COUNT 10
76 #define I40E_FDIR_WAIT_INTERVAL_US 1000
78 /* Wait count and interval for fdir filter flush */
79 #define I40E_FDIR_FLUSH_RETRY 50
80 #define I40E_FDIR_FLUSH_INTERVAL_MS 5
82 #define I40E_COUNTER_PF 2
83 /* Statistic counter index for one pf */
84 #define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF)
85 #define I40E_MAX_FLX_SOURCE_OFF 480
86 #define I40E_FLX_OFFSET_IN_FIELD_VECTOR 50
88 #define NONUSE_FLX_PIT_DEST_OFF 63
89 #define NONUSE_FLX_PIT_FSIZE 1
90 #define MK_FLX_PIT(src_offset, fsize, dst_offset) ( \
91 (((src_offset) << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) & \
92 I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) | \
93 (((fsize) << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
94 I40E_PRTQF_FLX_PIT_FSIZE_MASK) | \
95 ((((dst_offset) + I40E_FLX_OFFSET_IN_FIELD_VECTOR) << \
96 I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \
97 I40E_PRTQF_FLX_PIT_DEST_OFF_MASK))
99 #define I40E_FDIR_FLOWS ( \
100 (1 << RTE_ETH_FLOW_FRAG_IPV4) | \
101 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \
102 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \
103 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \
104 (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \
105 (1 << RTE_ETH_FLOW_FRAG_IPV6) | \
106 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \
107 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \
108 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \
109 (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) | \
110 (1 << RTE_ETH_FLOW_L2_PAYLOAD))
112 #define I40E_FLEX_WORD_MASK(off) (0x80 >> (off))
114 static int i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq);
115 static int i40e_check_fdir_flex_conf(
116 const struct rte_eth_fdir_flex_conf *conf);
117 static void i40e_set_flx_pld_cfg(struct i40e_pf *pf,
118 const struct rte_eth_flex_payload_cfg *cfg);
119 static void i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
120 enum i40e_filter_pctype pctype,
121 const struct rte_eth_fdir_flex_mask *mask_cfg);
122 static int i40e_fdir_construct_pkt(struct i40e_pf *pf,
123 const struct rte_eth_fdir_input *fdir_input,
124 unsigned char *raw_pkt);
125 static int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
126 const struct rte_eth_fdir_filter *filter,
128 static int i40e_fdir_filter_programming(struct i40e_pf *pf,
129 enum i40e_filter_pctype pctype,
130 const struct rte_eth_fdir_filter *filter,
132 static int i40e_fdir_flush(struct rte_eth_dev *dev);
133 static void i40e_fdir_info_get(struct rte_eth_dev *dev,
134 struct rte_eth_fdir_info *fdir);
135 static void i40e_fdir_stats_get(struct rte_eth_dev *dev,
136 struct rte_eth_fdir_stats *stat);
139 i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
141 struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
142 struct i40e_hmc_obj_rxq rx_ctx;
143 int err = I40E_SUCCESS;
145 memset(&rx_ctx, 0, sizeof(struct i40e_hmc_obj_rxq));
146 /* Init the RX queue in hardware */
147 rx_ctx.dbuff = I40E_RXBUF_SZ_1024 >> I40E_RXQ_CTX_DBUFF_SHIFT;
149 rx_ctx.base = rxq->rx_ring_phys_addr / I40E_QUEUE_BASE_ADDR_UNIT;
150 rx_ctx.qlen = rxq->nb_rx_desc;
151 #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
154 rx_ctx.dtype = i40e_header_split_none;
155 rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE;
156 rx_ctx.rxmax = ETHER_MAX_LEN;
157 rx_ctx.tphrdesc_ena = 1;
158 rx_ctx.tphwdesc_ena = 1;
159 rx_ctx.tphdata_ena = 1;
160 rx_ctx.tphhead_ena = 1;
161 rx_ctx.lrxqthresh = 2;
167 err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx);
168 if (err != I40E_SUCCESS) {
169 PMD_DRV_LOG(ERR, "Failed to clear FDIR RX queue context.");
172 err = i40e_set_lan_rx_queue_context(hw, rxq->reg_idx, &rx_ctx);
173 if (err != I40E_SUCCESS) {
174 PMD_DRV_LOG(ERR, "Failed to set FDIR RX queue context.");
177 rxq->qrx_tail = hw->hw_addr +
178 I40E_QRX_TAIL(rxq->vsi->base_queue);
181 /* Init the RX tail regieter. */
182 I40E_PCI_REG_WRITE(rxq->qrx_tail, 0);
183 I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
189 * i40e_fdir_setup - reserve and initialize the Flow Director resources
190 * @pf: board private structure
193 i40e_fdir_setup(struct i40e_pf *pf)
195 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
196 struct i40e_vsi *vsi;
197 int err = I40E_SUCCESS;
198 char z_name[RTE_MEMZONE_NAMESIZE];
199 const struct rte_memzone *mz = NULL;
200 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
202 if ((pf->flags & I40E_FLAG_FDIR) == 0) {
203 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
204 return I40E_NOT_SUPPORTED;
207 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u,"
208 " num_filters_best_effort = %u.",
209 hw->func_caps.fd_filters_guaranteed,
210 hw->func_caps.fd_filters_best_effort);
212 vsi = pf->fdir.fdir_vsi;
214 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
217 /* make new FDIR VSI */
218 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0);
220 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
221 return I40E_ERR_NO_AVAILABLE_VSI;
223 pf->fdir.fdir_vsi = vsi;
225 /*Fdir tx queue setup*/
226 err = i40e_fdir_setup_tx_resources(pf);
228 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
232 /*Fdir rx queue setup*/
233 err = i40e_fdir_setup_rx_resources(pf);
235 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
239 err = i40e_tx_queue_init(pf->fdir.txq);
241 PMD_DRV_LOG(ERR, "Failed to do FDIR TX initialization.");
245 /* need switch on before dev start*/
246 err = i40e_switch_tx_queue(hw, vsi->base_queue, TRUE);
248 PMD_DRV_LOG(ERR, "Failed to do fdir TX switch on.");
252 /* Init the rx queue in hardware */
253 err = i40e_fdir_rx_queue_init(pf->fdir.rxq);
255 PMD_DRV_LOG(ERR, "Failed to do FDIR RX initialization.");
259 /* switch on rx queue */
260 err = i40e_switch_rx_queue(hw, vsi->base_queue, TRUE);
262 PMD_DRV_LOG(ERR, "Failed to do FDIR RX switch on.");
266 /* reserve memory for the fdir programming packet */
267 snprintf(z_name, sizeof(z_name), "%s_%s_%d",
268 eth_dev->driver->pci_drv.name,
270 eth_dev->data->port_id);
271 mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
273 PMD_DRV_LOG(ERR, "Cannot init memzone for "
274 "flow director program packet.");
275 err = I40E_ERR_NO_MEMORY;
278 pf->fdir.prg_pkt = mz->addr;
279 pf->fdir.dma_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
281 pf->fdir.match_counter_index = I40E_COUNTER_INDEX_FDIR(hw->pf_id);
282 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
287 i40e_dev_rx_queue_release(pf->fdir.rxq);
290 i40e_dev_tx_queue_release(pf->fdir.txq);
293 i40e_vsi_release(vsi);
294 pf->fdir.fdir_vsi = NULL;
299 * i40e_fdir_teardown - release the Flow Director resources
300 * @pf: board private structure
303 i40e_fdir_teardown(struct i40e_pf *pf)
305 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
306 struct i40e_vsi *vsi;
308 vsi = pf->fdir.fdir_vsi;
311 i40e_switch_tx_queue(hw, vsi->base_queue, FALSE);
312 i40e_switch_rx_queue(hw, vsi->base_queue, FALSE);
313 i40e_dev_rx_queue_release(pf->fdir.rxq);
315 i40e_dev_tx_queue_release(pf->fdir.txq);
317 i40e_vsi_release(vsi);
318 pf->fdir.fdir_vsi = NULL;
321 /* check whether the flow director table in empty */
323 i40e_fdir_empty(struct i40e_hw *hw)
325 uint32_t guarant_cnt, best_cnt;
327 guarant_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
328 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
329 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
330 best_cnt = (uint32_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
331 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
332 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
333 if (best_cnt + guarant_cnt > 0)
340 * Initialize the configuration about bytes stream extracted as flexible payload
344 i40e_init_flx_pld(struct i40e_pf *pf)
346 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
351 * Define the bytes stream extracted as flexible payload in
352 * field vector. By default, select 8 words from the beginning
353 * of payload as flexible payload.
355 for (i = I40E_FLXPLD_L2_IDX; i < I40E_MAX_FLXPLD_LAYER; i++) {
356 index = i * I40E_MAX_FLXPLD_FIED;
357 pf->fdir.flex_set[index].src_offset = 0;
358 pf->fdir.flex_set[index].size = I40E_FDIR_MAX_FLEXWORD_NUM;
359 pf->fdir.flex_set[index].dst_offset = 0;
360 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index), 0x0000C900);
362 I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/
364 I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/
367 /* initialize the masks */
368 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
369 pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++) {
370 if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)pctype))
372 pf->fdir.flex_mask[pctype].word_mask = 0;
373 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), 0);
374 for (i = 0; i < I40E_FDIR_BITMASK_NUM_WORD; i++) {
375 pf->fdir.flex_mask[pctype].bitmask[i].offset = 0;
376 pf->fdir.flex_mask[pctype].bitmask[i].mask = 0;
377 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), 0);
382 #define I40E_WORD(hi, lo) (uint16_t)((((hi) << 8) & 0xFF00) | ((lo) & 0xFF))
384 #define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \
385 if ((flex_pit2).src_offset < \
386 (flex_pit1).src_offset + (flex_pit1).size) { \
387 PMD_DRV_LOG(ERR, "src_offset should be not" \
388 " less than than previous offset" \
389 " + previous FSIZE."); \
395 * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure,
396 * and the flex_pit will be sorted by it's src_offset value
398 static inline uint16_t
399 i40e_srcoff_to_flx_pit(const uint16_t *src_offset,
400 struct i40e_fdir_flex_pit *flex_pit)
402 uint16_t src_tmp, size, num = 0;
403 uint16_t i, k, j = 0;
405 while (j < I40E_FDIR_MAX_FLEX_LEN) {
407 for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) {
408 if (src_offset[j + 1] == src_offset[j] + 1)
413 src_tmp = src_offset[j] + 1 - size;
414 /* the flex_pit need to be sort by src_offset */
415 for (i = 0; i < num; i++) {
416 if (src_tmp < flex_pit[i].src_offset)
419 /* if insert required, move backward */
420 for (k = num; k > i; k--)
421 flex_pit[k] = flex_pit[k - 1];
423 flex_pit[i].dst_offset = j + 1 - size;
424 flex_pit[i].src_offset = src_tmp;
425 flex_pit[i].size = size;
432 /* i40e_check_fdir_flex_payload -check flex payload configuration arguments */
434 i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg)
436 struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN];
439 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
440 if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
441 PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
446 memset(flex_pit, 0, sizeof(flex_pit));
447 num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
448 if (num > I40E_MAX_FLXPLD_FIED) {
449 PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
452 for (i = 0; i < num; i++) {
453 if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 ||
454 flex_pit[i].src_offset & 0x01) {
455 PMD_DRV_LOG(ERR, "flexpayload should be measured"
460 I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]);
466 * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration
467 * arguments are valid
470 i40e_check_fdir_flex_conf(const struct rte_eth_fdir_flex_conf *conf)
472 const struct rte_eth_flex_payload_cfg *flex_cfg;
473 const struct rte_eth_fdir_flex_mask *flex_mask;
480 PMD_DRV_LOG(INFO, "NULL pointer.");
483 /* check flexible payload setting configuration */
484 if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) {
485 PMD_DRV_LOG(ERR, "invalid number of payload setting.");
488 for (i = 0; i < conf->nb_payloads; i++) {
489 flex_cfg = &conf->flex_set[i];
490 if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) {
491 PMD_DRV_LOG(ERR, "invalid payload type.");
494 ret = i40e_check_fdir_flex_payload(flex_cfg);
496 PMD_DRV_LOG(ERR, "invalid flex payload arguments.");
501 /* check flex mask setting configuration */
502 if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) {
503 PMD_DRV_LOG(ERR, "invalid number of flex masks.");
506 for (i = 0; i < conf->nb_flexmasks; i++) {
507 flex_mask = &conf->flex_mask[i];
508 if (!I40E_VALID_FLOW(flex_mask->flow_type)) {
509 PMD_DRV_LOG(WARNING, "invalid flow type.");
513 for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) {
514 mask_tmp = I40E_WORD(flex_mask->mask[j],
515 flex_mask->mask[j + 1]);
516 if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) {
518 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) {
519 PMD_DRV_LOG(ERR, " exceed maximal"
520 " number of bitmasks.");
530 * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload
531 * @pf: board private structure
532 * @cfg: the rule how bytes stream is extracted as flexible payload
535 i40e_set_flx_pld_cfg(struct i40e_pf *pf,
536 const struct rte_eth_flex_payload_cfg *cfg)
538 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
539 struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED];
541 uint16_t num, min_next_off; /* in words */
542 uint8_t field_idx = 0;
543 uint8_t layer_idx = 0;
546 if (cfg->type == RTE_ETH_L2_PAYLOAD)
547 layer_idx = I40E_FLXPLD_L2_IDX;
548 else if (cfg->type == RTE_ETH_L3_PAYLOAD)
549 layer_idx = I40E_FLXPLD_L3_IDX;
550 else if (cfg->type == RTE_ETH_L4_PAYLOAD)
551 layer_idx = I40E_FLXPLD_L4_IDX;
553 memset(flex_pit, 0, sizeof(flex_pit));
554 num = i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit);
556 for (i = 0; i < RTE_MIN(num, RTE_DIM(flex_pit)); i++) {
557 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
558 /* record the info in fdir structure */
559 pf->fdir.flex_set[field_idx].src_offset =
560 flex_pit[i].src_offset / sizeof(uint16_t);
561 pf->fdir.flex_set[field_idx].size =
562 flex_pit[i].size / sizeof(uint16_t);
563 pf->fdir.flex_set[field_idx].dst_offset =
564 flex_pit[i].dst_offset / sizeof(uint16_t);
565 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
566 pf->fdir.flex_set[field_idx].size,
567 pf->fdir.flex_set[field_idx].dst_offset);
569 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
571 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
572 pf->fdir.flex_set[field_idx].size;
574 for (; i < I40E_MAX_FLXPLD_FIED; i++) {
575 /* set the non-used register obeying register's constrain */
576 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
577 NONUSE_FLX_PIT_DEST_OFF);
579 I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i),
586 * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload
587 * @pf: board private structure
588 * @pctype: packet classify type
589 * @flex_masks: mask for flexible payload
592 i40e_set_flex_mask_on_pctype(struct i40e_pf *pf,
593 enum i40e_filter_pctype pctype,
594 const struct rte_eth_fdir_flex_mask *mask_cfg)
596 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
597 struct i40e_fdir_flex_mask *flex_mask;
598 uint32_t flxinset, fd_mask;
600 uint8_t i, nb_bitmask = 0;
602 flex_mask = &pf->fdir.flex_mask[pctype];
603 memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
604 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
605 mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]);
606 if (mask_tmp != 0x0) {
607 flex_mask->word_mask |=
608 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
609 if (mask_tmp != UINT16_MAX) {
611 flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp;
612 flex_mask->bitmask[nb_bitmask].offset =
613 i / sizeof(uint16_t);
618 /* write mask to hw */
619 flxinset = (flex_mask->word_mask <<
620 I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
621 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
622 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
624 for (i = 0; i < nb_bitmask; i++) {
625 fd_mask = (flex_mask->bitmask[i].mask <<
626 I40E_PRTQF_FD_MSK_MASK_SHIFT) &
627 I40E_PRTQF_FD_MSK_MASK_MASK;
628 fd_mask |= ((flex_mask->bitmask[i].offset +
629 I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
630 I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
631 I40E_PRTQF_FD_MSK_OFFSET_MASK;
632 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
637 * Configure flow director related setting
640 i40e_fdir_configure(struct rte_eth_dev *dev)
642 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
643 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
644 struct rte_eth_fdir_flex_conf *conf;
645 enum i40e_filter_pctype pctype;
651 * configuration need to be done before
652 * flow director filters are added
653 * If filters exist, flush them.
655 if (i40e_fdir_empty(hw) < 0) {
656 ret = i40e_fdir_flush(dev);
658 PMD_DRV_LOG(ERR, "failed to flush fdir table.");
663 /* enable FDIR filter */
664 val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
665 val |= I40E_PFQF_CTL_0_FD_ENA_MASK;
666 i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
668 i40e_init_flx_pld(pf); /* set flex config to default value */
670 conf = &dev->data->dev_conf.fdir_conf.flex_conf;
671 ret = i40e_check_fdir_flex_conf(conf);
673 PMD_DRV_LOG(ERR, " invalid configuration arguments.");
676 /* configure flex payload */
677 for (i = 0; i < conf->nb_payloads; i++)
678 i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]);
679 /* configure flex mask*/
680 for (i = 0; i < conf->nb_flexmasks; i++) {
681 pctype = i40e_flowtype_to_pctype(conf->flex_mask[i].flow_type);
682 i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]);
689 i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
690 unsigned char *raw_pkt)
692 struct ether_hdr *ether = (struct ether_hdr *)raw_pkt;
694 struct ipv6_hdr *ip6;
695 static const uint8_t next_proto[] = {
696 [RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP,
697 [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
698 [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
699 [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = IPPROTO_SCTP,
700 [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = IPPROTO_IP,
701 [RTE_ETH_FLOW_FRAG_IPV6] = IPPROTO_NONE,
702 [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
703 [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
704 [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = IPPROTO_SCTP,
705 [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = IPPROTO_NONE,
708 switch (fdir_input->flow_type) {
709 case RTE_ETH_FLOW_L2_PAYLOAD:
710 ether->ether_type = fdir_input->flow.l2_flow.ether_type;
712 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
713 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
714 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
715 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
716 case RTE_ETH_FLOW_FRAG_IPV4:
717 ip = (struct ipv4_hdr *)(raw_pkt + sizeof(struct ether_hdr));
719 ether->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
720 ip->version_ihl = I40E_FDIR_IP_DEFAULT_VERSION_IHL;
721 /* set len to by default */
722 ip->total_length = rte_cpu_to_be_16(I40E_FDIR_IP_DEFAULT_LEN);
723 ip->time_to_live = I40E_FDIR_IP_DEFAULT_TTL;
725 * The source and destination fields in the transmitted packet
726 * need to be presented in a reversed order with respect
727 * to the expected received packets.
729 ip->src_addr = fdir_input->flow.ip4_flow.dst_ip;
730 ip->dst_addr = fdir_input->flow.ip4_flow.src_ip;
731 ip->next_proto_id = next_proto[fdir_input->flow_type];
733 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
734 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
735 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
736 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
737 case RTE_ETH_FLOW_FRAG_IPV6:
738 ip6 = (struct ipv6_hdr *)(raw_pkt + sizeof(struct ether_hdr));
740 ether->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
742 rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW);
744 rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
745 ip6->hop_limits = I40E_FDIR_IPv6_DEFAULT_HOP_LIMITS;
748 * The source and destination fields in the transmitted packet
749 * need to be presented in a reversed order with respect
750 * to the expected received packets.
752 rte_memcpy(&(ip6->src_addr),
753 &(fdir_input->flow.ipv6_flow.dst_ip),
755 rte_memcpy(&(ip6->dst_addr),
756 &(fdir_input->flow.ipv6_flow.src_ip),
758 ip6->proto = next_proto[fdir_input->flow_type];
761 PMD_DRV_LOG(ERR, "unknown flow type %u.",
762 fdir_input->flow_type);
769 * i40e_fdir_construct_pkt - construct packet based on fields in input
770 * @pf: board private structure
771 * @fdir_input: input set of the flow director entry
772 * @raw_pkt: a packet to be constructed
775 i40e_fdir_construct_pkt(struct i40e_pf *pf,
776 const struct rte_eth_fdir_input *fdir_input,
777 unsigned char *raw_pkt)
779 unsigned char *payload, *ptr;
782 struct sctp_hdr *sctp;
783 uint8_t size, dst = 0;
784 uint8_t i, pit_idx, set_idx = I40E_FLXPLD_L4_IDX; /* use l4 by default*/
786 /* fill the ethernet and IP head */
787 i40e_fdir_fill_eth_ip_head(fdir_input, raw_pkt);
789 /* fill the L4 head */
790 switch (fdir_input->flow_type) {
791 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
792 udp = (struct udp_hdr *)(raw_pkt + sizeof(struct ether_hdr) +
793 sizeof(struct ipv4_hdr));
794 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
796 * The source and destination fields in the transmitted packet
797 * need to be presented in a reversed order with respect
798 * to the expected received packets.
800 udp->src_port = fdir_input->flow.udp4_flow.dst_port;
801 udp->dst_port = fdir_input->flow.udp4_flow.src_port;
802 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN);
805 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
806 tcp = (struct tcp_hdr *)(raw_pkt + sizeof(struct ether_hdr) +
807 sizeof(struct ipv4_hdr));
808 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
810 * The source and destination fields in the transmitted packet
811 * need to be presented in a reversed order with respect
812 * to the expected received packets.
814 tcp->src_port = fdir_input->flow.tcp4_flow.dst_port;
815 tcp->dst_port = fdir_input->flow.tcp4_flow.src_port;
816 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
819 case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
820 sctp = (struct sctp_hdr *)(raw_pkt + sizeof(struct ether_hdr) +
821 sizeof(struct ipv4_hdr));
822 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
824 * The source and destination fields in the transmitted packet
825 * need to be presented in a reversed order with respect
826 * to the expected received packets.
828 sctp->src_port = fdir_input->flow.sctp4_flow.dst_port;
829 sctp->dst_port = fdir_input->flow.sctp4_flow.src_port;
830 sctp->tag = fdir_input->flow.sctp4_flow.verify_tag;
833 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
834 case RTE_ETH_FLOW_FRAG_IPV4:
835 payload = raw_pkt + sizeof(struct ether_hdr) +
836 sizeof(struct ipv4_hdr);
837 set_idx = I40E_FLXPLD_L3_IDX;
840 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
841 udp = (struct udp_hdr *)(raw_pkt + sizeof(struct ether_hdr) +
842 sizeof(struct ipv6_hdr));
843 payload = (unsigned char *)udp + sizeof(struct udp_hdr);
845 * The source and destination fields in the transmitted packet
846 * need to be presented in a reversed order with respect
847 * to the expected received packets.
849 udp->src_port = fdir_input->flow.udp6_flow.dst_port;
850 udp->dst_port = fdir_input->flow.udp6_flow.src_port;
851 udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
854 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
855 tcp = (struct tcp_hdr *)(raw_pkt + sizeof(struct ether_hdr) +
856 sizeof(struct ipv6_hdr));
857 payload = (unsigned char *)tcp + sizeof(struct tcp_hdr);
859 * The source and destination fields in the transmitted packet
860 * need to be presented in a reversed order with respect
861 * to the expected received packets.
863 tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF;
864 tcp->src_port = fdir_input->flow.udp6_flow.dst_port;
865 tcp->dst_port = fdir_input->flow.udp6_flow.src_port;
868 case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
869 sctp = (struct sctp_hdr *)(raw_pkt + sizeof(struct ether_hdr) +
870 sizeof(struct ipv6_hdr));
871 payload = (unsigned char *)sctp + sizeof(struct sctp_hdr);
873 * The source and destination fields in the transmitted packet
874 * need to be presented in a reversed order with respect
875 * to the expected received packets.
877 sctp->src_port = fdir_input->flow.sctp6_flow.dst_port;
878 sctp->dst_port = fdir_input->flow.sctp6_flow.src_port;
879 sctp->tag = fdir_input->flow.sctp6_flow.verify_tag;
882 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
883 case RTE_ETH_FLOW_FRAG_IPV6:
884 payload = raw_pkt + sizeof(struct ether_hdr) +
885 sizeof(struct ipv6_hdr);
886 set_idx = I40E_FLXPLD_L3_IDX;
888 case RTE_ETH_FLOW_L2_PAYLOAD:
889 payload = raw_pkt + sizeof(struct ether_hdr);
891 * ARP packet is a special case on which the payload
892 * starts after the whole ARP header
894 if (fdir_input->flow.l2_flow.ether_type ==
895 rte_cpu_to_be_16(ETHER_TYPE_ARP))
896 payload += sizeof(struct arp_hdr);
897 set_idx = I40E_FLXPLD_L2_IDX;
900 PMD_DRV_LOG(ERR, "unknown flow type %u.", fdir_input->flow_type);
904 /* fill the flexbytes to payload */
905 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
906 pit_idx = set_idx * I40E_MAX_FLXPLD_FIED + i;
907 size = pf->fdir.flex_set[pit_idx].size;
910 dst = pf->fdir.flex_set[pit_idx].dst_offset * sizeof(uint16_t);
912 pf->fdir.flex_set[pit_idx].src_offset * sizeof(uint16_t);
913 (void)rte_memcpy(ptr,
914 &fdir_input->flow_ext.flexbytes[dst],
915 size * sizeof(uint16_t));
921 /* Construct the tx flags */
922 static inline uint64_t
923 i40e_build_ctob(uint32_t td_cmd,
928 return rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DATA |
929 ((uint64_t)td_cmd << I40E_TXD_QW1_CMD_SHIFT) |
930 ((uint64_t)td_offset << I40E_TXD_QW1_OFFSET_SHIFT) |
931 ((uint64_t)size << I40E_TXD_QW1_TX_BUF_SZ_SHIFT) |
932 ((uint64_t)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
936 * check the programming status descriptor in rx queue.
937 * done after Programming Flow Director is programmed on
941 i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
943 volatile union i40e_rx_desc *rxdp;
950 rxdp = &rxq->rx_ring[rxq->rx_tail];
951 qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len);
952 rx_status = (qword1 & I40E_RXD_QW1_STATUS_MASK)
953 >> I40E_RXD_QW1_STATUS_SHIFT;
955 if (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
956 len = qword1 >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT;
957 id = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
958 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
960 if (len == I40E_RX_PROG_STATUS_DESC_LENGTH &&
961 id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) {
963 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
964 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
966 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
967 PMD_DRV_LOG(ERR, "Failed to add FDIR filter"
968 " (FD_ID %u): programming status"
970 rxdp->wb.qword0.hi_dword.fd_id);
972 } else if (error == (0x1 <<
973 I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
974 PMD_DRV_LOG(ERR, "Failed to delete FDIR filter"
975 " (FD_ID %u): programming status"
977 rxdp->wb.qword0.hi_dword.fd_id);
980 PMD_DRV_LOG(ERR, "invalid programming status"
981 " reported, error = %u.", error);
983 PMD_DRV_LOG(ERR, "unknown programming status"
984 " reported, len = %d, id = %u.", len, id);
985 rxdp->wb.qword1.status_error_len = 0;
987 if (unlikely(rxq->rx_tail == rxq->nb_rx_desc))
994 * i40e_add_del_fdir_filter - add or remove a flow director filter.
995 * @pf: board private structure
996 * @filter: fdir filter entry
997 * @add: 0 - delete, 1 - add
1000 i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
1001 const struct rte_eth_fdir_filter *filter,
1004 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1005 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1006 enum i40e_filter_pctype pctype;
1009 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
1010 PMD_DRV_LOG(ERR, "FDIR is not enabled, please"
1011 " check the mode in fdir_conf.");
1015 if (!I40E_VALID_FLOW(filter->input.flow_type)) {
1016 PMD_DRV_LOG(ERR, "invalid flow_type input.");
1019 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1020 PMD_DRV_LOG(ERR, "Invalid queue ID");
1023 if (filter->input.flow_ext.is_vf &&
1024 filter->input.flow_ext.dst_id >= pf->vf_num) {
1025 PMD_DRV_LOG(ERR, "Invalid VF ID");
1029 memset(pkt, 0, I40E_FDIR_PKT_LEN);
1031 ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
1033 PMD_DRV_LOG(ERR, "construct packet for fdir fails.");
1036 pctype = i40e_flowtype_to_pctype(filter->input.flow_type);
1037 ret = i40e_fdir_filter_programming(pf, pctype, filter, add);
1039 PMD_DRV_LOG(ERR, "fdir programming fails for PCTYPE(%u).",
1047 * i40e_fdir_filter_programming - Program a flow director filter rule.
1048 * Is done by Flow Director Programming Descriptor followed by packet
1049 * structure that contains the filter fields need to match.
1050 * @pf: board private structure
1052 * @filter: fdir filter entry
1053 * @add: 0 - delelet, 1 - add
1056 i40e_fdir_filter_programming(struct i40e_pf *pf,
1057 enum i40e_filter_pctype pctype,
1058 const struct rte_eth_fdir_filter *filter,
1061 struct i40e_tx_queue *txq = pf->fdir.txq;
1062 struct i40e_rx_queue *rxq = pf->fdir.rxq;
1063 const struct rte_eth_fdir_action *fdir_action = &filter->action;
1064 volatile struct i40e_tx_desc *txdp;
1065 volatile struct i40e_filter_program_desc *fdirdp;
1070 PMD_DRV_LOG(INFO, "filling filter programming descriptor.");
1071 fdirdp = (volatile struct i40e_filter_program_desc *)
1072 (&(txq->tx_ring[txq->tx_tail]));
1074 fdirdp->qindex_flex_ptype_vsi =
1075 rte_cpu_to_le_32((fdir_action->rx_queue <<
1076 I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
1077 I40E_TXD_FLTR_QW0_QINDEX_MASK);
1079 fdirdp->qindex_flex_ptype_vsi |=
1080 rte_cpu_to_le_32((fdir_action->flex_off <<
1081 I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
1082 I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
1084 fdirdp->qindex_flex_ptype_vsi |=
1085 rte_cpu_to_le_32((pctype <<
1086 I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
1087 I40E_TXD_FLTR_QW0_PCTYPE_MASK);
1089 if (filter->input.flow_ext.is_vf)
1090 vsi_id = pf->vfs[filter->input.flow_ext.dst_id].vsi->vsi_id;
1092 /* Use LAN VSI Id by default */
1093 vsi_id = pf->main_vsi->vsi_id;
1094 fdirdp->qindex_flex_ptype_vsi |=
1095 rte_cpu_to_le_32(((uint32_t)vsi_id <<
1096 I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
1097 I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
1099 fdirdp->dtype_cmd_cntindex =
1100 rte_cpu_to_le_32(I40E_TX_DESC_DTYPE_FILTER_PROG);
1103 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1104 I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
1105 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1107 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(
1108 I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
1109 I40E_TXD_FLTR_QW1_PCMD_SHIFT);
1111 if (fdir_action->behavior == RTE_ETH_FDIR_REJECT)
1112 dest = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET;
1113 else if (fdir_action->behavior == RTE_ETH_FDIR_ACCEPT)
1114 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1115 else if (fdir_action->behavior == RTE_ETH_FDIR_PASSTHRU)
1116 dest = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER;
1118 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1119 " unsupported fdir behavior.");
1123 fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32((dest <<
1124 I40E_TXD_FLTR_QW1_DEST_SHIFT) &
1125 I40E_TXD_FLTR_QW1_DEST_MASK);
1127 fdirdp->dtype_cmd_cntindex |=
1128 rte_cpu_to_le_32((fdir_action->report_status<<
1129 I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
1130 I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
1132 fdirdp->dtype_cmd_cntindex |=
1133 rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK);
1134 fdirdp->dtype_cmd_cntindex |=
1135 rte_cpu_to_le_32((pf->fdir.match_counter_index <<
1136 I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
1137 I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
1139 fdirdp->fd_id = rte_cpu_to_le_32(filter->soft_id);
1141 PMD_DRV_LOG(INFO, "filling transmit descriptor.");
1142 txdp = &(txq->tx_ring[txq->tx_tail + 1]);
1143 txdp->buffer_addr = rte_cpu_to_le_64(pf->fdir.dma_addr);
1144 td_cmd = I40E_TX_DESC_CMD_EOP |
1145 I40E_TX_DESC_CMD_RS |
1146 I40E_TX_DESC_CMD_DUMMY;
1148 txdp->cmd_type_offset_bsz =
1149 i40e_build_ctob(td_cmd, 0, I40E_FDIR_PKT_LEN, 0);
1151 txq->tx_tail += 2; /* set 2 descriptors above, fdirdp and txdp */
1152 if (txq->tx_tail >= txq->nb_tx_desc)
1154 /* Update the tx tail register */
1156 I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
1158 for (i = 0; i < I40E_FDIR_WAIT_COUNT; i++) {
1159 rte_delay_us(I40E_FDIR_WAIT_INTERVAL_US);
1160 if ((txdp->cmd_type_offset_bsz &
1161 rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) ==
1162 rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
1165 if (i >= I40E_FDIR_WAIT_COUNT) {
1166 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1167 " time out to get DD on tx queue.");
1170 /* totally delay 10 ms to check programming status*/
1171 rte_delay_us((I40E_FDIR_WAIT_COUNT - i) * I40E_FDIR_WAIT_INTERVAL_US);
1172 if (i40e_check_fdir_programming_status(rxq) < 0) {
1173 PMD_DRV_LOG(ERR, "Failed to program FDIR filter:"
1174 " programming status reported.");
1182 * i40e_fdir_flush - clear all filters of Flow Director table
1183 * @pf: board private structure
1186 i40e_fdir_flush(struct rte_eth_dev *dev)
1188 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1189 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1191 uint16_t guarant_cnt, best_cnt;
1194 I40E_WRITE_REG(hw, I40E_PFQF_CTL_1, I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
1195 I40E_WRITE_FLUSH(hw);
1197 for (i = 0; i < I40E_FDIR_FLUSH_RETRY; i++) {
1198 rte_delay_ms(I40E_FDIR_FLUSH_INTERVAL_MS);
1199 reg = I40E_READ_REG(hw, I40E_PFQF_CTL_1);
1200 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
1203 if (i >= I40E_FDIR_FLUSH_RETRY) {
1204 PMD_DRV_LOG(ERR, "FD table did not flush, may need more time.");
1207 guarant_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
1208 I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
1209 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
1210 best_cnt = (uint16_t)((I40E_READ_REG(hw, I40E_PFQF_FDSTAT) &
1211 I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
1212 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
1213 if (guarant_cnt != 0 || best_cnt != 0) {
1214 PMD_DRV_LOG(ERR, "Failed to flush FD table.");
1217 PMD_DRV_LOG(INFO, "FD table Flush success.");
1222 i40e_fdir_info_get_flex_set(struct i40e_pf *pf,
1223 struct rte_eth_flex_payload_cfg *flex_set,
1226 struct i40e_fdir_flex_pit *flex_pit;
1227 struct rte_eth_flex_payload_cfg *ptr = flex_set;
1228 uint16_t src, dst, size, j, k;
1229 uint8_t i, layer_idx;
1231 for (layer_idx = I40E_FLXPLD_L2_IDX;
1232 layer_idx <= I40E_FLXPLD_L4_IDX;
1234 if (layer_idx == I40E_FLXPLD_L2_IDX)
1235 ptr->type = RTE_ETH_L2_PAYLOAD;
1236 else if (layer_idx == I40E_FLXPLD_L3_IDX)
1237 ptr->type = RTE_ETH_L3_PAYLOAD;
1238 else if (layer_idx == I40E_FLXPLD_L4_IDX)
1239 ptr->type = RTE_ETH_L4_PAYLOAD;
1241 for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) {
1242 flex_pit = &pf->fdir.flex_set[layer_idx *
1243 I40E_MAX_FLXPLD_FIED + i];
1244 if (flex_pit->size == 0)
1246 src = flex_pit->src_offset * sizeof(uint16_t);
1247 dst = flex_pit->dst_offset * sizeof(uint16_t);
1248 size = flex_pit->size * sizeof(uint16_t);
1249 for (j = src, k = dst; j < src + size; j++, k++)
1250 ptr->src_offset[k] = j;
1258 i40e_fdir_info_get_flex_mask(struct i40e_pf *pf,
1259 struct rte_eth_fdir_flex_mask *flex_mask,
1262 struct i40e_fdir_flex_mask *mask;
1263 struct rte_eth_fdir_flex_mask *ptr = flex_mask;
1266 uint16_t off_bytes, mask_tmp;
1268 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
1269 i <= I40E_FILTER_PCTYPE_L2_PAYLOAD;
1271 mask = &pf->fdir.flex_mask[i];
1272 if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)i))
1274 flow_type = i40e_pctype_to_flowtype((enum i40e_filter_pctype)i);
1275 for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) {
1276 if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) {
1277 ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX;
1278 ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX;
1280 ptr->mask[j * sizeof(uint16_t)] = 0x0;
1281 ptr->mask[j * sizeof(uint16_t) + 1] = 0x0;
1284 for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) {
1285 off_bytes = mask->bitmask[j].offset * sizeof(uint16_t);
1286 mask_tmp = ~mask->bitmask[j].mask;
1287 ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp);
1288 ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp);
1290 ptr->flow_type = flow_type;
1297 * i40e_fdir_info_get - get information of Flow Director
1298 * @pf: ethernet device to get info from
1299 * @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with
1300 * the flow director information.
1303 i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir)
1305 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1306 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1307 uint16_t num_flex_set = 0;
1308 uint16_t num_flex_mask = 0;
1310 if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT)
1311 fdir->mode = RTE_FDIR_MODE_PERFECT;
1313 fdir->mode = RTE_FDIR_MODE_NONE;
1316 (uint32_t)hw->func_caps.fd_filters_guaranteed;
1318 (uint32_t)hw->func_caps.fd_filters_best_effort;
1319 fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN;
1320 fdir->flow_types_mask[0] = I40E_FDIR_FLOWS;
1321 fdir->flex_payload_unit = sizeof(uint16_t);
1322 fdir->flex_bitmask_unit = sizeof(uint16_t);
1323 fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED;
1324 fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF;
1325 fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD;
1327 i40e_fdir_info_get_flex_set(pf,
1328 fdir->flex_conf.flex_set,
1330 i40e_fdir_info_get_flex_mask(pf,
1331 fdir->flex_conf.flex_mask,
1334 fdir->flex_conf.nb_payloads = num_flex_set;
1335 fdir->flex_conf.nb_flexmasks = num_flex_mask;
1339 * i40e_fdir_stat_get - get statistics of Flow Director
1340 * @pf: ethernet device to get info from
1341 * @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with
1342 * the flow director statistics.
1345 i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat)
1347 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1348 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1351 fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
1353 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
1354 I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
1356 (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
1357 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
1361 i40e_fdir_filter_set(struct rte_eth_dev *dev,
1362 struct rte_eth_fdir_filter_info *info)
1364 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1365 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1369 PMD_DRV_LOG(ERR, "Invalid pointer");
1373 switch (info->info_type) {
1374 case RTE_ETH_FDIR_FILTER_INPUT_SET_SELECT:
1375 ret = i40e_filter_inset_select(hw,
1376 &(info->info.input_set_conf), RTE_ETH_FILTER_FDIR);
1379 PMD_DRV_LOG(ERR, "FD filter info type (%d) not supported",
1388 * i40e_fdir_ctrl_func - deal with all operations on flow director.
1389 * @pf: board private structure
1390 * @filter_op:operation will be taken.
1391 * @arg: a pointer to specific structure corresponding to the filter_op
1394 i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
1395 enum rte_filter_op filter_op,
1398 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1401 if ((pf->flags & I40E_FLAG_FDIR) == 0)
1404 if (filter_op == RTE_ETH_FILTER_NOP)
1407 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
1410 switch (filter_op) {
1411 case RTE_ETH_FILTER_ADD:
1412 ret = i40e_add_del_fdir_filter(dev,
1413 (struct rte_eth_fdir_filter *)arg,
1416 case RTE_ETH_FILTER_DELETE:
1417 ret = i40e_add_del_fdir_filter(dev,
1418 (struct rte_eth_fdir_filter *)arg,
1421 case RTE_ETH_FILTER_FLUSH:
1422 ret = i40e_fdir_flush(dev);
1424 case RTE_ETH_FILTER_INFO:
1425 i40e_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg);
1427 case RTE_ETH_FILTER_SET:
1428 ret = i40e_fdir_filter_set(dev,
1429 (struct rte_eth_fdir_filter_info *)arg);
1431 case RTE_ETH_FILTER_STATS:
1432 i40e_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg);
1435 PMD_DRV_LOG(ERR, "unknown operation %u.", filter_op);