X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fqede_filter.c;h=bad55740a795c8d523c9e8fb850eb286b5d71649;hb=7e9c8558428e1f437611e1d0398610701e7f8a77;hp=bdf2885821fbb9e00379b8427567be0a0b06a1d7;hpb=f5765f66f9bb95cda9ab33e4b5078704fad77331;p=dpdk.git diff --git a/drivers/net/qede/qede_filter.c b/drivers/net/qede/qede_filter.c index bdf2885821..bad55740a7 100644 --- a/drivers/net/qede/qede_filter.c +++ b/drivers/net/qede/qede_filter.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "qede_ethdev.h" @@ -220,7 +221,7 @@ qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev, case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: /* fill the common ip header */ - arfs->tuple.eth_proto = ETHER_TYPE_IPv4; + arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPV4; arfs->tuple.dst_ipv4 = input->flow.ip4_flow.dst_ip; arfs->tuple.src_ipv4 = input->flow.ip4_flow.src_ip; arfs->tuple.ip_proto = next_proto[input->flow_type]; @@ -236,7 +237,7 @@ qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev, break; case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: - arfs->tuple.eth_proto = ETHER_TYPE_IPv6; + arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPV6; arfs->tuple.ip_proto = next_proto[input->flow_type]; rte_memcpy(arfs->tuple.dst_ipv6, &input->flow.ipv6_flow.dst_ip, @@ -271,6 +272,7 @@ qede_config_arfs_filter(struct rte_eth_dev *eth_dev, { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_ntuple_filter_params params; char mz_name[RTE_MEMZONE_NAMESIZE] = {0}; struct qede_arfs_entry *tmp = NULL; const struct rte_memzone *mz; @@ -289,7 +291,7 @@ qede_config_arfs_filter(struct rte_eth_dev *eth_dev, /* soft_id could have been used as memzone string, but soft_id is * not currently used so it has no significance. */ - snprintf(mz_name, sizeof(mz_name) - 1, "%lx", + snprintf(mz_name, sizeof(mz_name), "%lx", (unsigned long)rte_get_timer_cycles()); mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE); @@ -343,12 +345,18 @@ qede_config_arfs_filter(struct rte_eth_dev *eth_dev, ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt, &qdev->arfs_info.arfs); } + + memset(¶ms, 0, sizeof(params)); + params.addr = (dma_addr_t)mz->iova; + params.length = pkt_len; + params.qid = arfs->rx_queue; + params.vport_id = 0; + params.b_is_add = add; + params.b_is_drop = arfs->is_drop; + /* configure filter with ECORE_SPQ_MODE_EBLOCK */ rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL, - (dma_addr_t)mz->iova, - pkt_len, - arfs->rx_queue, - 0, add); + ¶ms); if (rc == ECORE_SUCCESS) { if (add) { arfs->pkt_len = pkt_len; @@ -430,7 +438,7 @@ qede_fdir_filter_add(struct rte_eth_dev *eth_dev, return -EINVAL; } - if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) { + if (fdir->action.rx_queue >= QEDE_RSS_COUNT(eth_dev)) { DP_ERR(edev, "invalid queue number %u\n", fdir->action.rx_queue); return -EINVAL; @@ -456,57 +464,57 @@ qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev, struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); uint16_t *ether_type; uint8_t *raw_pkt; - struct ipv4_hdr *ip; - struct ipv6_hdr *ip6; - struct udp_hdr *udp; - struct tcp_hdr *tcp; + struct rte_ipv4_hdr *ip; + struct rte_ipv6_hdr *ip6; + struct rte_udp_hdr *udp; + struct rte_tcp_hdr *tcp; uint16_t len; raw_pkt = (uint8_t *)buff; - len = 2 * sizeof(struct ether_addr); - raw_pkt += 2 * sizeof(struct ether_addr); + len = 2 * sizeof(struct rte_ether_addr); + raw_pkt += 2 * sizeof(struct rte_ether_addr); ether_type = (uint16_t *)raw_pkt; raw_pkt += sizeof(uint16_t); len += sizeof(uint16_t); *ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto); switch (arfs->tuple.eth_proto) { - case ETHER_TYPE_IPv4: - ip = (struct ipv4_hdr *)raw_pkt; + case RTE_ETHER_TYPE_IPV4: + ip = (struct rte_ipv4_hdr *)raw_pkt; ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL; - ip->total_length = sizeof(struct ipv4_hdr); + ip->total_length = sizeof(struct rte_ipv4_hdr); ip->next_proto_id = arfs->tuple.ip_proto; ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL; ip->dst_addr = arfs->tuple.dst_ipv4; ip->src_addr = arfs->tuple.src_ipv4; - len += sizeof(struct ipv4_hdr); + len += sizeof(struct rte_ipv4_hdr); params->ipv4 = true; raw_pkt = (uint8_t *)buff; /* UDP */ if (arfs->tuple.ip_proto == IPPROTO_UDP) { - udp = (struct udp_hdr *)(raw_pkt + len); + udp = (struct rte_udp_hdr *)(raw_pkt + len); udp->dst_port = arfs->tuple.dst_port; udp->src_port = arfs->tuple.src_port; - udp->dgram_len = sizeof(struct udp_hdr); - len += sizeof(struct udp_hdr); + udp->dgram_len = sizeof(struct rte_udp_hdr); + len += sizeof(struct rte_udp_hdr); /* adjust ip total_length */ - ip->total_length += sizeof(struct udp_hdr); + ip->total_length += sizeof(struct rte_udp_hdr); params->udp = true; } else { /* TCP */ - tcp = (struct tcp_hdr *)(raw_pkt + len); + tcp = (struct rte_tcp_hdr *)(raw_pkt + len); tcp->src_port = arfs->tuple.src_port; tcp->dst_port = arfs->tuple.dst_port; tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF; - len += sizeof(struct tcp_hdr); + len += sizeof(struct rte_tcp_hdr); /* adjust ip total_length */ - ip->total_length += sizeof(struct tcp_hdr); + ip->total_length += sizeof(struct rte_tcp_hdr); params->tcp = true; } break; - case ETHER_TYPE_IPv6: - ip6 = (struct ipv6_hdr *)raw_pkt; + case RTE_ETHER_TYPE_IPV6: + ip6 = (struct rte_ipv6_hdr *)raw_pkt; ip6->proto = arfs->tuple.ip_proto; ip6->vtc_flow = rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW); @@ -515,23 +523,23 @@ qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev, IPV6_ADDR_LEN); rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6, IPV6_ADDR_LEN); - len += sizeof(struct ipv6_hdr); + len += sizeof(struct rte_ipv6_hdr); params->ipv6 = true; raw_pkt = (uint8_t *)buff; /* UDP */ if (arfs->tuple.ip_proto == IPPROTO_UDP) { - udp = (struct udp_hdr *)(raw_pkt + len); + udp = (struct rte_udp_hdr *)(raw_pkt + len); udp->src_port = arfs->tuple.src_port; udp->dst_port = arfs->tuple.dst_port; - len += sizeof(struct udp_hdr); + len += sizeof(struct rte_udp_hdr); params->udp = true; } else { /* TCP */ - tcp = (struct tcp_hdr *)(raw_pkt + len); + tcp = (struct rte_tcp_hdr *)(raw_pkt + len); tcp->src_port = arfs->tuple.src_port; tcp->dst_port = arfs->tuple.dst_port; tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF; - len += sizeof(struct tcp_hdr); + len += sizeof(struct rte_tcp_hdr); params->tcp = true; } break; @@ -991,25 +999,25 @@ qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast, break; case ECORE_FILTER_MAC: memcpy(ucast->mac, conf->outer_mac.addr_bytes, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); break; case ECORE_FILTER_INNER_MAC: memcpy(ucast->mac, conf->inner_mac.addr_bytes, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); break; case ECORE_FILTER_MAC_VNI_PAIR: memcpy(ucast->mac, conf->outer_mac.addr_bytes, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); ucast->vni = conf->tenant_id; break; case ECORE_FILTER_INNER_MAC_VNI_PAIR: memcpy(ucast->mac, conf->inner_mac.addr_bytes, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); ucast->vni = conf->tenant_id; break; case ECORE_FILTER_INNER_PAIR: memcpy(ucast->mac, conf->inner_mac.addr_bytes, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); ucast->vlan = conf->inner_vlan; break; default: @@ -1022,7 +1030,7 @@ qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast, static int _qede_tunn_filter_config(struct rte_eth_dev *eth_dev, const struct rte_eth_tunnel_filter_conf *conf, - __attribute__((unused)) enum rte_filter_op filter_op, + __rte_unused enum rte_filter_op filter_op, enum ecore_tunn_clss *clss, bool add) { @@ -1159,6 +1167,331 @@ qede_tunn_filter_config(struct rte_eth_dev *eth_dev, return 0; } +static int +qede_flow_validate_attr(__rte_unused struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + if (attr == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ATTR, NULL, + "NULL attribute"); + return -rte_errno; + } + + if (attr->group != 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr, + "Groups are not supported"); + return -rte_errno; + } + + if (attr->priority != 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr, + "Priorities are not supported"); + return -rte_errno; + } + + if (attr->egress != 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr, + "Egress is not supported"); + return -rte_errno; + } + + if (attr->transfer != 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr, + "Transfer is not supported"); + return -rte_errno; + } + + if (attr->ingress == 0) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr, + "Only ingress is supported"); + return -rte_errno; + } + + return 0; +} + +static int +qede_flow_parse_pattern(__rte_unused struct rte_eth_dev *dev, + const struct rte_flow_item pattern[], + struct rte_flow_error *error, + struct rte_flow *flow) +{ + bool l3 = false, l4 = false; + + if (pattern == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL, + "NULL pattern"); + return -rte_errno; + } + + for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { + if (!pattern->spec) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, + "Item spec not defined"); + return -rte_errno; + } + + if (pattern->last) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, + "Item last not supported"); + return -rte_errno; + } + + if (pattern->mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, + "Item mask not supported"); + return -rte_errno; + } + + /* Below validation is only for 4 tuple flow + * (GFT_PROFILE_TYPE_4_TUPLE) + * - src and dst L3 address (IPv4 or IPv6) + * - src and dst L4 port (TCP or UDP) + */ + + switch (pattern->type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + l3 = true; + + if (flow) { + const struct rte_flow_item_ipv4 *spec; + + spec = pattern->spec; + flow->entry.tuple.src_ipv4 = spec->hdr.src_addr; + flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr; + flow->entry.tuple.eth_proto = + RTE_ETHER_TYPE_IPV4; + } + break; + + case RTE_FLOW_ITEM_TYPE_IPV6: + l3 = true; + + if (flow) { + const struct rte_flow_item_ipv6 *spec; + + spec = pattern->spec; + rte_memcpy(flow->entry.tuple.src_ipv6, + spec->hdr.src_addr, + IPV6_ADDR_LEN); + rte_memcpy(flow->entry.tuple.dst_ipv6, + spec->hdr.dst_addr, + IPV6_ADDR_LEN); + flow->entry.tuple.eth_proto = + RTE_ETHER_TYPE_IPV6; + } + break; + + case RTE_FLOW_ITEM_TYPE_UDP: + l4 = true; + + if (flow) { + const struct rte_flow_item_udp *spec; + + spec = pattern->spec; + flow->entry.tuple.src_port = + spec->hdr.src_port; + flow->entry.tuple.dst_port = + spec->hdr.dst_port; + flow->entry.tuple.ip_proto = IPPROTO_UDP; + } + break; + + case RTE_FLOW_ITEM_TYPE_TCP: + l4 = true; + + if (flow) { + const struct rte_flow_item_tcp *spec; + + spec = pattern->spec; + flow->entry.tuple.src_port = + spec->hdr.src_port; + flow->entry.tuple.dst_port = + spec->hdr.dst_port; + flow->entry.tuple.ip_proto = IPPROTO_TCP; + } + + break; + default: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, + "Only 4 tuple (IPV4, IPV6, UDP and TCP) item types supported"); + return -rte_errno; + } + } + + if (!(l3 && l4)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + pattern, + "Item types need to have both L3 and L4 protocols"); + return -rte_errno; + } + + return 0; +} + +static int +qede_flow_parse_actions(struct rte_eth_dev *dev, + const struct rte_flow_action actions[], + struct rte_flow_error *error, + struct rte_flow *flow) +{ + const struct rte_flow_action_queue *queue; + + if (actions == NULL) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL, + "NULL actions"); + return -rte_errno; + } + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_QUEUE: + queue = actions->conf; + + if (queue->index >= QEDE_RSS_COUNT(dev)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Bad QUEUE action"); + return -rte_errno; + } + + if (flow) + flow->entry.rx_queue = queue->index; + + break; + case RTE_FLOW_ACTION_TYPE_DROP: + if (flow) + flow->entry.is_drop = true; + break; + default: + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Action is not supported - only ACTION_TYPE_QUEUE and ACTION_TYPE_DROP supported"); + return -rte_errno; + } + } + + return 0; +} + +static int +qede_flow_parse(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item patterns[], + const struct rte_flow_action actions[], + struct rte_flow_error *error, + struct rte_flow *flow) + +{ + int rc = 0; + + rc = qede_flow_validate_attr(dev, attr, error); + if (rc) + return rc; + + /* parse and validate item pattern and actions. + * Given item list and actions will be translate to qede PMD + * specific arfs structure. + */ + rc = qede_flow_parse_pattern(dev, patterns, error, flow); + if (rc) + return rc; + + rc = qede_flow_parse_actions(dev, actions, error, flow); + + return rc; +} + +static int +qede_flow_validate(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item patterns[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + return qede_flow_parse(dev, attr, patterns, actions, error, NULL); +} + +static struct rte_flow * +qede_flow_create(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct rte_flow *flow = NULL; + int rc; + + flow = rte_zmalloc("qede_rte_flow", sizeof(*flow), 0); + if (flow == NULL) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Failed to allocate memory"); + return NULL; + } + + rc = qede_flow_parse(dev, attr, pattern, actions, error, flow); + if (rc < 0) { + rte_free(flow); + return NULL; + } + + rc = qede_config_arfs_filter(dev, &flow->entry, true); + if (rc < 0) { + rte_flow_error_set(error, rc, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to configure flow filter"); + rte_free(flow); + return NULL; + } + + return flow; +} + +static int +qede_flow_destroy(struct rte_eth_dev *eth_dev, + struct rte_flow *flow, + struct rte_flow_error *error) +{ + int rc = 0; + + rc = qede_config_arfs_filter(eth_dev, &flow->entry, false); + if (rc < 0) { + rte_flow_error_set(error, rc, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to delete flow filter"); + rte_free(flow); + } + + return rc; +} + +const struct rte_flow_ops qede_flow_ops = { + .validate = qede_flow_validate, + .create = qede_flow_create, + .destroy = qede_flow_destroy, +}; + int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev, enum rte_filter_type filter_type, enum rte_filter_op filter_op, @@ -1195,6 +1528,17 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev, return qede_fdir_filter_conf(eth_dev, filter_op, arg); case RTE_ETH_FILTER_NTUPLE: return qede_ntuple_filter_conf(eth_dev, filter_op, arg); + case RTE_ETH_FILTER_GENERIC: + if (ECORE_IS_CMT(edev)) { + DP_ERR(edev, "flowdir is not supported in 100G mode\n"); + return -ENOTSUP; + } + + if (filter_op != RTE_ETH_FILTER_GET) + return -EINVAL; + + *(const void **)arg = &qede_flow_ops; + return 0; case RTE_ETH_FILTER_MACVLAN: case RTE_ETH_FILTER_ETHERTYPE: case RTE_ETH_FILTER_FLEXIBLE: @@ -1210,5 +1554,3 @@ int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev, return 0; } - -/* RTE_FLOW */