X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_i40e%2Fi40e_fdir.c;h=612377a401f196c331a24518fa9cfaba8dc6c6fe;hb=089159d25f6f534cd29c5e5e0d1893193e49bb5a;hp=143754a454ab8751a5a424214113a8b49089d735;hpb=8ae001ba176e3a35e823a3d62da4ae70bd853427;p=dpdk.git diff --git a/lib/librte_pmd_i40e/i40e_fdir.c b/lib/librte_pmd_i40e/i40e_fdir.c index 143754a454..612377a401 100644 --- a/lib/librte_pmd_i40e/i40e_fdir.c +++ b/lib/librte_pmd_i40e/i40e_fdir.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -80,9 +80,42 @@ #define I40E_COUNTER_PF 2 /* Statistic counter index for one pf */ #define I40E_COUNTER_INDEX_FDIR(pf_id) (0 + (pf_id) * I40E_COUNTER_PF) +#define I40E_MAX_FLX_SOURCE_OFF 480 #define I40E_FLX_OFFSET_IN_FIELD_VECTOR 50 +#define NONUSE_FLX_PIT_DEST_OFF 63 +#define NONUSE_FLX_PIT_FSIZE 1 +#define MK_FLX_PIT(src_offset, fsize, dst_offset) ( \ + (((src_offset) << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT) & \ + I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) | \ + (((fsize) << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \ + I40E_PRTQF_FLX_PIT_FSIZE_MASK) | \ + ((((dst_offset) + I40E_FLX_OFFSET_IN_FIELD_VECTOR) << \ + I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \ + I40E_PRTQF_FLX_PIT_DEST_OFF_MASK)) + +#define I40E_FDIR_FLOWS ( \ + (1 << RTE_ETH_FLOW_FRAG_IPV4) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) | \ + (1 << RTE_ETH_FLOW_FRAG_IPV6) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) | \ + (1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER)) + +#define I40E_FLEX_WORD_MASK(off) (0x80 >> (off)) + static int i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq); +static int i40e_check_fdir_flex_conf( + const struct rte_eth_fdir_flex_conf *conf); +static void i40e_set_flx_pld_cfg(struct i40e_pf *pf, + const struct rte_eth_flex_payload_cfg *cfg); +static void i40e_set_flex_mask_on_pctype(struct i40e_pf *pf, + enum i40e_filter_pctype pctype, + const struct rte_eth_fdir_flex_mask *mask_cfg); static int i40e_fdir_construct_pkt(struct i40e_pf *pf, const struct rte_eth_fdir_input *fdir_input, unsigned char *raw_pkt); @@ -94,6 +127,10 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf, const struct rte_eth_fdir_filter *filter, bool add); static int i40e_fdir_flush(struct rte_eth_dev *dev); +static void i40e_fdir_info_get(struct rte_eth_dev *dev, + struct rte_eth_fdir_info *fdir); +static void i40e_fdir_stats_get(struct rte_eth_dev *dev, + struct rte_eth_fdir_stats *stat); static int i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq) @@ -159,6 +196,11 @@ i40e_fdir_setup(struct i40e_pf *pf) const struct rte_memzone *mz = NULL; struct rte_eth_dev *eth_dev = pf->adapter->eth_dev; + if ((pf->flags & I40E_FLAG_FDIR) == 0) { + PMD_INIT_LOG(ERR, "HW doesn't support FDIR"); + return I40E_NOT_SUPPORTED; + } + PMD_DRV_LOG(INFO, "FDIR HW Capabilities: num_filters_guaranteed = %u," " num_filters_best_effort = %u.", hw->func_caps.fd_filters_guaranteed, @@ -166,9 +208,8 @@ i40e_fdir_setup(struct i40e_pf *pf) vsi = pf->fdir.fdir_vsi; if (vsi) { - PMD_DRV_LOG(ERR, "FDIR vsi pointer needs " - "to be null before creation."); - return I40E_ERR_BAD_PTR; + PMD_DRV_LOG(INFO, "FDIR initialization has been done."); + return I40E_SUCCESS; } /* make new FDIR VSI */ vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, pf->main_vsi, 0); @@ -265,6 +306,8 @@ i40e_fdir_teardown(struct i40e_pf *pf) struct i40e_vsi *vsi; vsi = pf->fdir.fdir_vsi; + if (!vsi) + return; i40e_switch_tx_queue(hw, vsi->base_queue, FALSE); i40e_switch_rx_queue(hw, vsi->base_queue, FALSE); i40e_dev_rx_queue_release(pf->fdir.rxq); @@ -334,6 +377,260 @@ i40e_init_flx_pld(struct i40e_pf *pf) } } +#define I40E_WORD(hi, lo) (uint16_t)((((hi) << 8) & 0xFF00) | ((lo) & 0xFF)) + +#define I40E_VALIDATE_FLEX_PIT(flex_pit1, flex_pit2) do { \ + if ((flex_pit2).src_offset < \ + (flex_pit1).src_offset + (flex_pit1).size) { \ + PMD_DRV_LOG(ERR, "src_offset should be not" \ + " less than than previous offset" \ + " + previous FSIZE."); \ + return -EINVAL; \ + } \ +} while (0) + +/* + * i40e_srcoff_to_flx_pit - transform the src_offset into flex_pit structure, + * and the flex_pit will be sorted by it's src_offset value + */ +static inline uint16_t +i40e_srcoff_to_flx_pit(const uint16_t *src_offset, + struct i40e_fdir_flex_pit *flex_pit) +{ + uint16_t src_tmp, size, num = 0; + uint16_t i, k, j = 0; + + while (j < I40E_FDIR_MAX_FLEX_LEN) { + size = 1; + for (; j < I40E_FDIR_MAX_FLEX_LEN - 1; j++) { + if (src_offset[j + 1] == src_offset[j] + 1) + size++; + else + break; + } + src_tmp = src_offset[j] + 1 - size; + /* the flex_pit need to be sort by src_offset */ + for (i = 0; i < num; i++) { + if (src_tmp < flex_pit[i].src_offset) + break; + } + /* if insert required, move backward */ + for (k = num; k > i; k--) + flex_pit[k] = flex_pit[k - 1]; + /* insert */ + flex_pit[i].dst_offset = j + 1 - size; + flex_pit[i].src_offset = src_tmp; + flex_pit[i].size = size; + j++; + num++; + } + return num; +} + +/* i40e_check_fdir_flex_payload -check flex payload configuration arguments */ +static inline int +i40e_check_fdir_flex_payload(const struct rte_eth_flex_payload_cfg *flex_cfg) +{ + struct i40e_fdir_flex_pit flex_pit[I40E_FDIR_MAX_FLEX_LEN]; + uint16_t num, i; + + for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) { + if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) { + PMD_DRV_LOG(ERR, "exceeds maxmial payload limit."); + return -EINVAL; + } + } + + memset(flex_pit, 0, sizeof(flex_pit)); + num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit); + if (num > I40E_MAX_FLXPLD_FIED) { + PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields."); + return -EINVAL; + } + for (i = 0; i < num; i++) { + if (flex_pit[i].size & 0x01 || flex_pit[i].dst_offset & 0x01 || + flex_pit[i].src_offset & 0x01) { + PMD_DRV_LOG(ERR, "flexpayload should be measured" + " in word"); + return -EINVAL; + } + if (i != num - 1) + I40E_VALIDATE_FLEX_PIT(flex_pit[i], flex_pit[i + 1]); + } + return 0; +} + +/* + * i40e_check_fdir_flex_conf -check if the flex payload and mask configuration + * arguments are valid + */ +static int +i40e_check_fdir_flex_conf(const struct rte_eth_fdir_flex_conf *conf) +{ + const struct rte_eth_flex_payload_cfg *flex_cfg; + const struct rte_eth_fdir_flex_mask *flex_mask; + uint16_t mask_tmp; + uint8_t nb_bitmask; + uint16_t i, j; + int ret = 0; + + if (conf == NULL) { + PMD_DRV_LOG(INFO, "NULL pointer."); + return -EINVAL; + } + /* check flexible payload setting configuration */ + if (conf->nb_payloads > RTE_ETH_L4_PAYLOAD) { + PMD_DRV_LOG(ERR, "invalid number of payload setting."); + return -EINVAL; + } + for (i = 0; i < conf->nb_payloads; i++) { + flex_cfg = &conf->flex_set[i]; + if (flex_cfg->type > RTE_ETH_L4_PAYLOAD) { + PMD_DRV_LOG(ERR, "invalid payload type."); + return -EINVAL; + } + ret = i40e_check_fdir_flex_payload(flex_cfg); + if (ret < 0) { + PMD_DRV_LOG(ERR, "invalid flex payload arguments."); + return -EINVAL; + } + } + + /* check flex mask setting configuration */ + if (conf->nb_flexmasks >= RTE_ETH_FLOW_MAX) { + PMD_DRV_LOG(ERR, "invalid number of flex masks."); + return -EINVAL; + } + for (i = 0; i < conf->nb_flexmasks; i++) { + flex_mask = &conf->flex_mask[i]; + if (!I40E_VALID_FLOW(flex_mask->flow_type)) { + PMD_DRV_LOG(WARNING, "invalid flow type."); + return -EINVAL; + } + nb_bitmask = 0; + for (j = 0; j < I40E_FDIR_MAX_FLEX_LEN; j += sizeof(uint16_t)) { + mask_tmp = I40E_WORD(flex_mask->mask[j], + flex_mask->mask[j + 1]); + if (mask_tmp != 0x0 && mask_tmp != UINT16_MAX) { + nb_bitmask++; + if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) { + PMD_DRV_LOG(ERR, " exceed maximal" + " number of bitmasks."); + return -EINVAL; + } + } + } + } + return 0; +} + +/* + * i40e_set_flx_pld_cfg -configure the rule how bytes stream is extracted as flexible payload + * @pf: board private structure + * @cfg: the rule how bytes stream is extracted as flexible payload + */ +static void +i40e_set_flx_pld_cfg(struct i40e_pf *pf, + const struct rte_eth_flex_payload_cfg *cfg) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_fdir_flex_pit flex_pit[I40E_MAX_FLXPLD_FIED]; + uint32_t flx_pit; + uint16_t num, min_next_off; /* in words */ + uint8_t field_idx = 0; + uint8_t layer_idx = 0; + uint16_t i; + + if (cfg->type == RTE_ETH_L2_PAYLOAD) + layer_idx = I40E_FLXPLD_L2_IDX; + else if (cfg->type == RTE_ETH_L3_PAYLOAD) + layer_idx = I40E_FLXPLD_L3_IDX; + else if (cfg->type == RTE_ETH_L4_PAYLOAD) + layer_idx = I40E_FLXPLD_L4_IDX; + + memset(flex_pit, 0, sizeof(flex_pit)); + num = i40e_srcoff_to_flx_pit(cfg->src_offset, flex_pit); + + for (i = 0; i < num; i++) { + field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i; + /* record the info in fdir structure */ + pf->fdir.flex_set[field_idx].src_offset = + flex_pit[i].src_offset / sizeof(uint16_t); + pf->fdir.flex_set[field_idx].size = + flex_pit[i].size / sizeof(uint16_t); + pf->fdir.flex_set[field_idx].dst_offset = + flex_pit[i].dst_offset / sizeof(uint16_t); + flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset, + pf->fdir.flex_set[field_idx].size, + pf->fdir.flex_set[field_idx].dst_offset); + + I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit); + } + min_next_off = pf->fdir.flex_set[field_idx].src_offset + + pf->fdir.flex_set[field_idx].size; + + for (; i < I40E_MAX_FLXPLD_FIED; i++) { + /* set the non-used register obeying register's constrain */ + flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE, + NONUSE_FLX_PIT_DEST_OFF); + I40E_WRITE_REG(hw, + I40E_PRTQF_FLX_PIT(layer_idx * I40E_MAX_FLXPLD_FIED + i), + flx_pit); + min_next_off++; + } +} + +/* + * i40e_set_flex_mask_on_pctype - configure the mask on flexible payload + * @pf: board private structure + * @pctype: packet classify type + * @flex_masks: mask for flexible payload + */ +static void +i40e_set_flex_mask_on_pctype(struct i40e_pf *pf, + enum i40e_filter_pctype pctype, + const struct rte_eth_fdir_flex_mask *mask_cfg) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_fdir_flex_mask *flex_mask; + uint32_t flxinset, fd_mask; + uint16_t mask_tmp; + uint8_t i, nb_bitmask = 0; + + flex_mask = &pf->fdir.flex_mask[pctype]; + memset(flex_mask, 0, sizeof(struct i40e_fdir_flex_mask)); + for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) { + mask_tmp = I40E_WORD(mask_cfg->mask[i], mask_cfg->mask[i + 1]); + if (mask_tmp != 0x0) { + flex_mask->word_mask |= + I40E_FLEX_WORD_MASK(i / sizeof(uint16_t)); + if (mask_tmp != UINT16_MAX) { + /* set bit mask */ + flex_mask->bitmask[nb_bitmask].mask = ~mask_tmp; + flex_mask->bitmask[nb_bitmask].offset = + i / sizeof(uint16_t); + nb_bitmask++; + } + } + } + /* write mask to hw */ + flxinset = (flex_mask->word_mask << + I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) & + I40E_PRTQF_FD_FLXINSET_INSET_MASK; + I40E_WRITE_REG(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset); + + for (i = 0; i < nb_bitmask; i++) { + fd_mask = (flex_mask->bitmask[i].mask << + I40E_PRTQF_FD_MSK_MASK_SHIFT) & + I40E_PRTQF_FD_MSK_MASK_MASK; + fd_mask |= ((flex_mask->bitmask[i].offset + + I40E_FLX_OFFSET_IN_FIELD_VECTOR) << + I40E_PRTQF_FD_MSK_OFFSET_SHIFT) & + I40E_PRTQF_FD_MSK_OFFSET_MASK; + I40E_WRITE_REG(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask); + } +} + /* * Configure flow director related setting */ @@ -342,7 +639,10 @@ i40e_fdir_configure(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_fdir_flex_conf *conf; + enum i40e_filter_pctype pctype; uint32_t val; + uint8_t i; int ret = 0; /* @@ -358,19 +658,26 @@ i40e_fdir_configure(struct rte_eth_dev *dev) } } + /* enable FDIR filter */ val = I40E_READ_REG(hw, I40E_PFQF_CTL_0); - if ((pf->flags & I40E_FLAG_FDIR) && - dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) { - /* enable FDIR filter */ - val |= I40E_PFQF_CTL_0_FD_ENA_MASK; - I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, val); + val |= I40E_PFQF_CTL_0_FD_ENA_MASK; + I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, val); + + i40e_init_flx_pld(pf); /* set flex config to default value */ - i40e_init_flx_pld(pf); /* set flex config to default value */ - } else { - /* disable FDIR filter */ - val &= ~I40E_PFQF_CTL_0_FD_ENA_MASK; - I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, val); - pf->flags &= ~I40E_FLAG_FDIR; + conf = &dev->data->dev_conf.fdir_conf.flex_conf; + ret = i40e_check_fdir_flex_conf(conf); + if (ret < 0) { + PMD_DRV_LOG(ERR, " invalid configuration arguments."); + return -EINVAL; + } + /* configure flex payload */ + for (i = 0; i < conf->nb_payloads; i++) + i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]); + /* configure flex mask*/ + for (i = 0; i < conf->nb_flexmasks; i++) { + pctype = i40e_flowtype_to_pctype(conf->flex_mask[i].flow_type); + i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]); } return ret; @@ -384,24 +691,24 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input, struct ipv4_hdr *ip; struct ipv6_hdr *ip6; static const uint8_t next_proto[] = { - [RTE_ETH_FLOW_TYPE_UDPV4] = IPPROTO_UDP, - [RTE_ETH_FLOW_TYPE_TCPV4] = IPPROTO_TCP, - [RTE_ETH_FLOW_TYPE_SCTPV4] = IPPROTO_SCTP, - [RTE_ETH_FLOW_TYPE_IPV4_OTHER] = IPPROTO_IP, - [RTE_ETH_FLOW_TYPE_FRAG_IPV4] = IPPROTO_IP, - [RTE_ETH_FLOW_TYPE_UDPV6] = IPPROTO_UDP, - [RTE_ETH_FLOW_TYPE_TCPV6] = IPPROTO_TCP, - [RTE_ETH_FLOW_TYPE_SCTPV6] = IPPROTO_SCTP, - [RTE_ETH_FLOW_TYPE_IPV6_OTHER] = IPPROTO_NONE, - [RTE_ETH_FLOW_TYPE_FRAG_IPV6] = IPPROTO_NONE, + [RTE_ETH_FLOW_FRAG_IPV4] = IPPROTO_IP, + [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP, + [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP, + [RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] = IPPROTO_SCTP, + [RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] = IPPROTO_IP, + [RTE_ETH_FLOW_FRAG_IPV6] = IPPROTO_NONE, + [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP, + [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP, + [RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] = IPPROTO_SCTP, + [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = IPPROTO_NONE, }; switch (fdir_input->flow_type) { - case RTE_ETH_FLOW_TYPE_UDPV4: - case RTE_ETH_FLOW_TYPE_TCPV4: - case RTE_ETH_FLOW_TYPE_SCTPV4: - case RTE_ETH_FLOW_TYPE_IPV4_OTHER: - case RTE_ETH_FLOW_TYPE_FRAG_IPV4: + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: + case RTE_ETH_FLOW_FRAG_IPV4: ip = (struct ipv4_hdr *)(raw_pkt + sizeof(struct ether_hdr)); ether->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); @@ -418,11 +725,11 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input, ip->dst_addr = fdir_input->flow.ip4_flow.src_ip; ip->next_proto_id = next_proto[fdir_input->flow_type]; break; - case RTE_ETH_FLOW_TYPE_UDPV6: - case RTE_ETH_FLOW_TYPE_TCPV6: - case RTE_ETH_FLOW_TYPE_SCTPV6: - case RTE_ETH_FLOW_TYPE_IPV6_OTHER: - case RTE_ETH_FLOW_TYPE_FRAG_IPV6: + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: + case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + case RTE_ETH_FLOW_FRAG_IPV6: ip6 = (struct ipv6_hdr *)(raw_pkt + sizeof(struct ether_hdr)); ether->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); @@ -438,10 +745,10 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input, * to the expected received packets. */ rte_memcpy(&(ip6->src_addr), - &(fdir_input->flow.ip6_flow.dst_ip), + &(fdir_input->flow.ipv6_flow.dst_ip), IPV6_ADDR_LEN); rte_memcpy(&(ip6->dst_addr), - &(fdir_input->flow.ip6_flow.src_ip), + &(fdir_input->flow.ipv6_flow.src_ip), IPV6_ADDR_LEN); ip6->proto = next_proto[fdir_input->flow_type]; break; @@ -476,7 +783,7 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf, /* fill the L4 head */ switch (fdir_input->flow_type) { - case RTE_ETH_FLOW_TYPE_UDPV4: + case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: udp = (struct udp_hdr *)(raw_pkt + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr)); payload = (unsigned char *)udp + sizeof(struct udp_hdr); @@ -490,7 +797,7 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf, udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_UDP_DEFAULT_LEN); break; - case RTE_ETH_FLOW_TYPE_TCPV4: + case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: tcp = (struct tcp_hdr *)(raw_pkt + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr)); payload = (unsigned char *)tcp + sizeof(struct tcp_hdr); @@ -504,21 +811,21 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf, tcp->data_off = I40E_FDIR_TCP_DEFAULT_DATAOFF; break; - case RTE_ETH_FLOW_TYPE_SCTPV4: + case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP: sctp = (struct sctp_hdr *)(raw_pkt + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr)); payload = (unsigned char *)sctp + sizeof(struct sctp_hdr); sctp->tag = fdir_input->flow.sctp4_flow.verify_tag; break; - case RTE_ETH_FLOW_TYPE_IPV4_OTHER: - case RTE_ETH_FLOW_TYPE_FRAG_IPV4: + case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: + case RTE_ETH_FLOW_FRAG_IPV4: payload = raw_pkt + sizeof(struct ether_hdr) + sizeof(struct ipv4_hdr); set_idx = I40E_FLXPLD_L3_IDX; break; - case RTE_ETH_FLOW_TYPE_UDPV6: + case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: udp = (struct udp_hdr *)(raw_pkt + sizeof(struct ether_hdr) + sizeof(struct ipv6_hdr)); payload = (unsigned char *)udp + sizeof(struct udp_hdr); @@ -532,7 +839,7 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf, udp->dgram_len = rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN); break; - case RTE_ETH_FLOW_TYPE_TCPV6: + case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: tcp = (struct tcp_hdr *)(raw_pkt + sizeof(struct ether_hdr) + sizeof(struct ipv6_hdr)); payload = (unsigned char *)tcp + sizeof(struct tcp_hdr); @@ -546,15 +853,15 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf, tcp->dst_port = fdir_input->flow.udp6_flow.src_port; break; - case RTE_ETH_FLOW_TYPE_SCTPV6: + case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP: sctp = (struct sctp_hdr *)(raw_pkt + sizeof(struct ether_hdr) + sizeof(struct ipv6_hdr)); payload = (unsigned char *)sctp + sizeof(struct sctp_hdr); sctp->tag = fdir_input->flow.sctp6_flow.verify_tag; break; - case RTE_ETH_FLOW_TYPE_IPV6_OTHER: - case RTE_ETH_FLOW_TYPE_FRAG_IPV6: + case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: + case RTE_ETH_FLOW_FRAG_IPV6: payload = raw_pkt + sizeof(struct ether_hdr) + sizeof(struct ipv6_hdr); set_idx = I40E_FLXPLD_L3_IDX; @@ -669,11 +976,13 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev, enum i40e_filter_pctype pctype; int ret = 0; - if (!(pf->flags & I40E_FLAG_FDIR)) { - PMD_DRV_LOG(ERR, "FDIR is not enabled."); + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) { + PMD_DRV_LOG(ERR, "FDIR is not enabled, please" + " check the mode in fdir_conf."); return -ENOTSUP; } - if (!I40E_VALID_FLOW_TYPE(filter->input.flow_type)) { + + if (!I40E_VALID_FLOW(filter->input.flow_type)) { PMD_DRV_LOG(ERR, "invalid flow_type input."); return -EINVAL; } @@ -861,6 +1170,145 @@ i40e_fdir_flush(struct rte_eth_dev *dev) return 0; } +static inline void +i40e_fdir_info_get_flex_set(struct i40e_pf *pf, + struct rte_eth_flex_payload_cfg *flex_set, + uint16_t *num) +{ + struct i40e_fdir_flex_pit *flex_pit; + struct rte_eth_flex_payload_cfg *ptr = flex_set; + uint16_t src, dst, size, j, k; + uint8_t i, layer_idx; + + for (layer_idx = I40E_FLXPLD_L2_IDX; + layer_idx <= I40E_FLXPLD_L4_IDX; + layer_idx++) { + if (layer_idx == I40E_FLXPLD_L2_IDX) + ptr->type = RTE_ETH_L2_PAYLOAD; + else if (layer_idx == I40E_FLXPLD_L3_IDX) + ptr->type = RTE_ETH_L3_PAYLOAD; + else if (layer_idx == I40E_FLXPLD_L4_IDX) + ptr->type = RTE_ETH_L4_PAYLOAD; + + for (i = 0; i < I40E_MAX_FLXPLD_FIED; i++) { + flex_pit = &pf->fdir.flex_set[layer_idx * + I40E_MAX_FLXPLD_FIED + i]; + if (flex_pit->size == 0) + continue; + src = flex_pit->src_offset * sizeof(uint16_t); + dst = flex_pit->dst_offset * sizeof(uint16_t); + size = flex_pit->size * sizeof(uint16_t); + for (j = src, k = dst; j < src + size; j++, k++) + ptr->src_offset[k] = j; + } + (*num)++; + ptr++; + } +} + +static inline void +i40e_fdir_info_get_flex_mask(struct i40e_pf *pf, + struct rte_eth_fdir_flex_mask *flex_mask, + uint16_t *num) +{ + struct i40e_fdir_flex_mask *mask; + struct rte_eth_fdir_flex_mask *ptr = flex_mask; + uint16_t flow_type; + uint8_t i, j; + uint16_t off_bytes, mask_tmp; + + for (i = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; + i <= I40E_FILTER_PCTYPE_FRAG_IPV6; + i++) { + mask = &pf->fdir.flex_mask[i]; + if (!I40E_VALID_PCTYPE((enum i40e_filter_pctype)i)) + continue; + flow_type = i40e_pctype_to_flowtype((enum i40e_filter_pctype)i); + for (j = 0; j < I40E_FDIR_MAX_FLEXWORD_NUM; j++) { + if (mask->word_mask & I40E_FLEX_WORD_MASK(j)) { + ptr->mask[j * sizeof(uint16_t)] = UINT8_MAX; + ptr->mask[j * sizeof(uint16_t) + 1] = UINT8_MAX; + } else { + ptr->mask[j * sizeof(uint16_t)] = 0x0; + ptr->mask[j * sizeof(uint16_t) + 1] = 0x0; + } + } + for (j = 0; j < I40E_FDIR_BITMASK_NUM_WORD; j++) { + off_bytes = mask->bitmask[j].offset * sizeof(uint16_t); + mask_tmp = ~mask->bitmask[j].mask; + ptr->mask[off_bytes] &= I40E_HI_BYTE(mask_tmp); + ptr->mask[off_bytes + 1] &= I40E_LO_BYTE(mask_tmp); + } + ptr->flow_type = flow_type; + ptr++; + (*num)++; + } +} + +/* + * i40e_fdir_info_get - get information of Flow Director + * @pf: ethernet device to get info from + * @fdir: a pointer to a structure of type *rte_eth_fdir_info* to be filled with + * the flow director information. + */ +static void +i40e_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint16_t num_flex_set = 0; + uint16_t num_flex_mask = 0; + + if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_PERFECT) + fdir->mode = RTE_FDIR_MODE_PERFECT; + else + fdir->mode = RTE_FDIR_MODE_NONE; + + fdir->guarant_spc = + (uint32_t)hw->func_caps.fd_filters_guaranteed; + fdir->best_spc = + (uint32_t)hw->func_caps.fd_filters_best_effort; + fdir->max_flexpayload = I40E_FDIR_MAX_FLEX_LEN; + fdir->flow_types_mask[0] = I40E_FDIR_FLOWS; + fdir->flex_payload_unit = sizeof(uint16_t); + fdir->flex_bitmask_unit = sizeof(uint16_t); + fdir->max_flex_payload_segment_num = I40E_MAX_FLXPLD_FIED; + fdir->flex_payload_limit = I40E_MAX_FLX_SOURCE_OFF; + fdir->max_flex_bitmask_num = I40E_FDIR_BITMASK_NUM_WORD; + + i40e_fdir_info_get_flex_set(pf, + fdir->flex_conf.flex_set, + &num_flex_set); + i40e_fdir_info_get_flex_mask(pf, + fdir->flex_conf.flex_mask, + &num_flex_mask); + + fdir->flex_conf.nb_payloads = num_flex_set; + fdir->flex_conf.nb_flexmasks = num_flex_mask; +} + +/* + * i40e_fdir_stat_get - get statistics of Flow Director + * @pf: ethernet device to get info from + * @stat: a pointer to a structure of type *rte_eth_fdir_stats* to be filled with + * the flow director statistics. + */ +static void +i40e_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *stat) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t fdstat; + + fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT); + stat->guarant_cnt = + (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >> + I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT); + stat->best_cnt = + (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >> + I40E_PFQF_FDSTAT_BEST_CNT_SHIFT); +} + /* * i40e_fdir_ctrl_func - deal with all operations on flow director. * @pf: board private structure @@ -875,11 +1323,11 @@ i40e_fdir_ctrl_func(struct rte_eth_dev *dev, struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); int ret = 0; - if (filter_op == RTE_ETH_FILTER_NOP) { - if (!(pf->flags & I40E_FLAG_FDIR)) - ret = -ENOTSUP; - return ret; - } + if ((pf->flags & I40E_FLAG_FDIR) == 0) + return -ENOTSUP; + + if (filter_op == RTE_ETH_FILTER_NOP) + return 0; if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH) return -EINVAL; @@ -898,6 +1346,12 @@ i40e_fdir_ctrl_func(struct rte_eth_dev *dev, case RTE_ETH_FILTER_FLUSH: ret = i40e_fdir_flush(dev); break; + case RTE_ETH_FILTER_INFO: + i40e_fdir_info_get(dev, (struct rte_eth_fdir_info *)arg); + break; + case RTE_ETH_FILTER_STATS: + i40e_fdir_stats_get(dev, (struct rte_eth_fdir_stats *)arg); + break; default: PMD_DRV_LOG(ERR, "unknown operation %u.", filter_op); ret = -EINVAL;