X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsfc%2Fsfc_flow.c;h=f2050f65e9a6204ae4ea5a2e5e8f7da6eebf3c21;hb=3bdf5f17219409cc8adf31e2cfdde784fdf063a1;hp=38e6b7301e9284219c8cc1420fa4855e81fd3ca9;hpb=e2675132444e181cee6748edae3b58d9feffaa72;p=dpdk.git diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c index 38e6b7301e..f2050f65e9 100644 --- a/drivers/net/sfc/sfc_flow.c +++ b/drivers/net/sfc/sfc_flow.c @@ -1,4 +1,6 @@ /*- + * BSD LICENSE + * * Copyright (c) 2017 Solarflare Communications Inc. * All rights reserved. * @@ -76,6 +78,7 @@ static sfc_flow_item_parse sfc_flow_parse_vlan; static sfc_flow_item_parse sfc_flow_parse_ipv4; static sfc_flow_item_parse sfc_flow_parse_ipv6; static sfc_flow_item_parse sfc_flow_parse_tcp; +static sfc_flow_item_parse sfc_flow_parse_udp; static boolean_t sfc_flow_is_zero(const uint8_t *buf, unsigned int size) @@ -204,8 +207,10 @@ sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item, * * @param item[in] * Item specification. Only source and destination addresses and - * Ethernet type fields are supported. If the mask is NULL, default - * mask will be used. Ranging is not supported. + * Ethernet type fields are supported. In addition to full and + * empty masks of destination address, individual/group mask is + * also supported. If the mask is NULL, default mask will be used. + * Ranging is not supported. * @param efx_spec[in, out] * EFX filter specification to update. * @param[out] error @@ -224,6 +229,9 @@ sfc_flow_parse_eth(const struct rte_flow_item *item, .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, .type = 0xffff, }; + const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = { + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 + }; rc = sfc_flow_parse_init(item, (const void **)&spec, @@ -243,6 +251,14 @@ sfc_flow_parse_eth(const struct rte_flow_item *item, efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC; rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes, EFX_MAC_ADDR_LEN); + } else if (memcmp(mask->dst.addr_bytes, ig_mask, + EFX_MAC_ADDR_LEN) == 0) { + if (is_unicast_ether_addr(&spec->dst)) + efx_spec->efs_match_flags |= + EFX_FILTER_MATCH_UNKNOWN_UCAST_DST; + else + efx_spec->efs_match_flags |= + EFX_FILTER_MATCH_UNKNOWN_MCAST_DST; } else if (!is_zero_ether_addr(&mask->dst)) { goto fail_bad_mask; } @@ -621,6 +637,87 @@ fail_bad_mask: return -rte_errno; } +/** + * Convert UDP item to EFX filter specification. + * + * @param item[in] + * Item specification. Only source and destination ports fields + * are supported. If the mask is NULL, default mask will be used. + * Ranging is not supported. + * @param efx_spec[in, out] + * EFX filter specification to update. + * @param[out] error + * Perform verbose error reporting if not NULL. + */ +static int +sfc_flow_parse_udp(const struct rte_flow_item *item, + efx_filter_spec_t *efx_spec, + struct rte_flow_error *error) +{ + int rc; + const struct rte_flow_item_udp *spec = NULL; + const struct rte_flow_item_udp *mask = NULL; + const struct rte_flow_item_udp supp_mask = { + .hdr = { + .src_port = 0xffff, + .dst_port = 0xffff, + } + }; + + rc = sfc_flow_parse_init(item, + (const void **)&spec, + (const void **)&mask, + &supp_mask, + &rte_flow_item_udp_mask, + sizeof(struct rte_flow_item_udp), + error); + if (rc != 0) + return rc; + + /* + * Filtering by UDP source and destination ports requires + * the appropriate IP_PROTO in hardware filters + */ + if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO; + efx_spec->efs_ip_proto = EFX_IPPROTO_UDP; + } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IP proto in pattern with UDP item should be appropriate"); + return -rte_errno; + } + + if (spec == NULL) + return 0; + + /* + * Source and destination ports are in big-endian byte order in item and + * in little-endian in efx_spec, so byte swap is used + */ + if (mask->hdr.src_port == supp_mask.hdr.src_port) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT; + efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port); + } else if (mask->hdr.src_port != 0) { + goto fail_bad_mask; + } + + if (mask->hdr.dst_port == supp_mask.hdr.dst_port) { + efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT; + efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port); + } else if (mask->hdr.dst_port != 0) { + goto fail_bad_mask; + } + + return 0; + +fail_bad_mask: + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Bad mask in the UDP pattern item"); + return -rte_errno; +} + static const struct sfc_flow_item sfc_flow_items[] = { { .type = RTE_FLOW_ITEM_TYPE_VOID, @@ -658,6 +755,12 @@ static const struct sfc_flow_item sfc_flow_items[] = { .layer = SFC_FLOW_ITEM_L4, .parse = sfc_flow_parse_tcp, }, + { + .type = RTE_FLOW_ITEM_TYPE_UDP, + .prev_layer = SFC_FLOW_ITEM_L3, + .layer = SFC_FLOW_ITEM_L4, + .parse = sfc_flow_parse_udp, + }, }; /* @@ -700,7 +803,7 @@ sfc_flow_parse_attr(const struct rte_flow_attr *attr, } flow->spec.efs_flags |= EFX_FILTER_FLAG_RX; - flow->spec.efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT; + flow->spec.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT; return 0; } @@ -734,8 +837,7 @@ sfc_flow_parse_pattern(const struct rte_flow_item pattern[], return -rte_errno; } - for (; pattern != NULL && - pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { + for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { item = sfc_flow_get_item(pattern->type); if (item == NULL) { rte_flow_error_set(error, ENOTSUP, @@ -765,13 +867,6 @@ sfc_flow_parse_pattern(const struct rte_flow_item pattern[], prev_layer = item->layer; } - if (pattern == NULL) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, NULL, - "NULL item"); - return -rte_errno; - } - return 0; } @@ -791,6 +886,170 @@ sfc_flow_parse_queue(struct sfc_adapter *sa, return 0; } +#if EFSYS_OPT_RX_SCALE +static int +sfc_flow_parse_rss(struct sfc_adapter *sa, + const struct rte_flow_action_rss *rss, + struct rte_flow *flow) +{ + unsigned int rxq_sw_index; + struct sfc_rxq *rxq; + unsigned int rxq_hw_index_min; + unsigned int rxq_hw_index_max; + const struct rte_eth_rss_conf *rss_conf = rss->rss_conf; + uint64_t rss_hf; + uint8_t *rss_key = NULL; + struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf; + unsigned int i; + + if (rss->num == 0) + return -EINVAL; + + rxq_sw_index = sa->rxq_count - 1; + rxq = sa->rxq_info[rxq_sw_index].rxq; + rxq_hw_index_min = rxq->hw_index; + rxq_hw_index_max = 0; + + for (i = 0; i < rss->num; ++i) { + rxq_sw_index = rss->queue[i]; + + if (rxq_sw_index >= sa->rxq_count) + return -EINVAL; + + rxq = sa->rxq_info[rxq_sw_index].rxq; + + if (rxq->hw_index < rxq_hw_index_min) + rxq_hw_index_min = rxq->hw_index; + + if (rxq->hw_index > rxq_hw_index_max) + rxq_hw_index_max = rxq->hw_index; + } + + rss_hf = (rss_conf != NULL) ? rss_conf->rss_hf : SFC_RSS_OFFLOADS; + if ((rss_hf & ~SFC_RSS_OFFLOADS) != 0) + return -EINVAL; + + if (rss_conf != NULL) { + if (rss_conf->rss_key_len != sizeof(sa->rss_key)) + return -EINVAL; + + rss_key = rss_conf->rss_key; + } else { + rss_key = sa->rss_key; + } + + flow->rss = B_TRUE; + + sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min; + sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max; + sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss_hf); + rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key)); + + for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) { + unsigned int rxq_sw_index = rss->queue[i % rss->num]; + struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq; + + sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min; + } + + return 0; +} +#endif /* EFSYS_OPT_RX_SCALE */ + +static int +sfc_flow_filter_insert(struct sfc_adapter *sa, + struct rte_flow *flow) +{ + efx_filter_spec_t *spec = &flow->spec; + +#if EFSYS_OPT_RX_SCALE + struct sfc_flow_rss *rss = &flow->rss_conf; + int rc = 0; + + if (flow->rss) { + unsigned int rss_spread = MIN(rss->rxq_hw_index_max - + rss->rxq_hw_index_min + 1, + EFX_MAXRSS); + + rc = efx_rx_scale_context_alloc(sa->nic, + EFX_RX_SCALE_EXCLUSIVE, + rss_spread, + &spec->efs_rss_context); + if (rc != 0) + goto fail_scale_context_alloc; + + rc = efx_rx_scale_mode_set(sa->nic, spec->efs_rss_context, + EFX_RX_HASHALG_TOEPLITZ, + rss->rss_hash_types, B_TRUE); + if (rc != 0) + goto fail_scale_mode_set; + + rc = efx_rx_scale_key_set(sa->nic, spec->efs_rss_context, + rss->rss_key, + sizeof(sa->rss_key)); + if (rc != 0) + goto fail_scale_key_set; + + spec->efs_dmaq_id = rss->rxq_hw_index_min; + spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS; + } + + rc = efx_filter_insert(sa->nic, spec); + if (rc != 0) + goto fail_filter_insert; + + if (flow->rss) { + /* + * Scale table is set after filter insertion because + * the table entries are relative to the base RxQ ID + * and the latter is submitted to the HW by means of + * inserting a filter, so by the time of the request + * the HW knows all the information needed to verify + * the table entries, and the operation will succeed + */ + rc = efx_rx_scale_tbl_set(sa->nic, spec->efs_rss_context, + rss->rss_tbl, RTE_DIM(rss->rss_tbl)); + if (rc != 0) + goto fail_scale_tbl_set; + } + + return 0; + +fail_scale_tbl_set: + efx_filter_remove(sa->nic, spec); + +fail_filter_insert: +fail_scale_key_set: +fail_scale_mode_set: + if (rss != NULL) + efx_rx_scale_context_free(sa->nic, spec->efs_rss_context); + +fail_scale_context_alloc: + return rc; +#else /* !EFSYS_OPT_RX_SCALE */ + return efx_filter_insert(sa->nic, spec); +#endif /* EFSYS_OPT_RX_SCALE */ +} + +static int +sfc_flow_filter_remove(struct sfc_adapter *sa, + struct rte_flow *flow) +{ + efx_filter_spec_t *spec = &flow->spec; + int rc = 0; + + rc = efx_filter_remove(sa->nic, spec); + if (rc != 0) + return rc; + +#if EFSYS_OPT_RX_SCALE + if (flow->rss) + rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context); +#endif /* EFSYS_OPT_RX_SCALE */ + + return rc; +} + static int sfc_flow_parse_actions(struct sfc_adapter *sa, const struct rte_flow_action actions[], @@ -824,6 +1083,20 @@ sfc_flow_parse_actions(struct sfc_adapter *sa, is_specified = B_TRUE; break; +#if EFSYS_OPT_RX_SCALE + case RTE_FLOW_ACTION_TYPE_RSS: + rc = sfc_flow_parse_rss(sa, actions->conf, flow); + if (rc != 0) { + rte_flow_error_set(error, rc, + RTE_FLOW_ERROR_TYPE_ACTION, actions, + "Bad RSS action"); + return -rte_errno; + } + + is_specified = B_TRUE; + break; +#endif /* EFSYS_OPT_RX_SCALE */ + default: rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, @@ -918,7 +1191,7 @@ sfc_flow_create(struct rte_eth_dev *dev, sfc_adapter_lock(sa); if (sa->state == SFC_ADAPTER_STARTED) { - rc = efx_filter_insert(sa->nic, &flow->spec); + rc = sfc_flow_filter_insert(sa, flow); if (rc != 0) { rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -952,7 +1225,7 @@ sfc_flow_remove(struct sfc_adapter *sa, SFC_ASSERT(sfc_adapter_is_locked(sa)); if (sa->state == SFC_ADAPTER_STARTED) { - rc = efx_filter_remove(sa->nic, &flow->spec); + rc = sfc_flow_filter_remove(sa, flow); if (rc != 0) rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, @@ -1017,12 +1290,35 @@ sfc_flow_flush(struct rte_eth_dev *dev, return -ret; } +static int +sfc_flow_isolate(struct rte_eth_dev *dev, int enable, + struct rte_flow_error *error) +{ + struct sfc_adapter *sa = dev->data->dev_private; + struct sfc_port *port = &sa->port; + int ret = 0; + + sfc_adapter_lock(sa); + if (sa->state != SFC_ADAPTER_INITIALIZED) { + rte_flow_error_set(error, EBUSY, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, "please close the port first"); + ret = -rte_errno; + } else { + port->isolated = (enable) ? B_TRUE : B_FALSE; + } + sfc_adapter_unlock(sa); + + return ret; +} + const struct rte_flow_ops sfc_flow_ops = { .validate = sfc_flow_validate, .create = sfc_flow_create, .destroy = sfc_flow_destroy, .flush = sfc_flow_flush, .query = NULL, + .isolate = sfc_flow_isolate, }; void @@ -1054,7 +1350,7 @@ sfc_flow_stop(struct sfc_adapter *sa) SFC_ASSERT(sfc_adapter_is_locked(sa)); TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) - efx_filter_remove(sa->nic, &flow->spec); + sfc_flow_filter_remove(sa, flow); } int @@ -1068,7 +1364,7 @@ sfc_flow_start(struct sfc_adapter *sa) SFC_ASSERT(sfc_adapter_is_locked(sa)); TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) { - rc = efx_filter_insert(sa->nic, &flow->spec); + rc = sfc_flow_filter_insert(sa, flow); if (rc != 0) goto fail_bad_flow; }