X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_ethdev%2Frte_flow.c;h=241af6c4ca80b67c62fa1ec61b5af859e91f9711;hb=3127f99274b679124658afdbfc49210730c50617;hp=3699edce4928e712cf85a58d93fbdd3903bc6928;hpb=44bf3c796be3f747eba5d4904d93614ff300e688;p=dpdk.git diff --git a/lib/librte_ethdev/rte_flow.c b/lib/librte_ethdev/rte_flow.c index 3699edce49..241af6c4ca 100644 --- a/lib/librte_ethdev/rte_flow.c +++ b/lib/librte_ethdev/rte_flow.c @@ -19,7 +19,7 @@ #include "rte_flow.h" /* Mbuf dynamic field name for metadata. */ -int rte_flow_dynf_metadata_offs = -1; +int32_t rte_flow_dynf_metadata_offs = -1; /* Mbuf dynamic field flag bit number for metadata. */ uint64_t rte_flow_dynf_metadata_mask; @@ -72,6 +72,7 @@ static const struct rte_flow_desc_data rte_flow_desc_item[] = { MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)), MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)), MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)), + MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)), MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)), MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)), MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)), @@ -95,6 +96,8 @@ static const struct rte_flow_desc_data rte_flow_desc_item[] = { MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)), MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)), MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)), + MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)), + MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)), }; /** Generate flow_action[] entry. */ @@ -173,6 +176,16 @@ static const struct rte_flow_desc_data rte_flow_desc_action[] = { MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)), MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)), MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)), + MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)), + MK_FLOW_ACTION(MODIFY_FIELD, + sizeof(struct rte_flow_action_modify_field)), + /** + * Shared action represented as handle of type + * (struct rte_flow_shared action *) stored in conf field (see + * struct rte_flow_action); no need for additional structure to * store + * shared action handle. + */ + MK_FLOW_ACTION(SHARED, 0), }; int @@ -206,6 +219,20 @@ error: return -rte_errno; } +static inline void +fts_enter(struct rte_eth_dev *dev) +{ + if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE)) + pthread_mutex_lock(&dev->data->flow_ops_mutex); +} + +static inline void +fts_exit(struct rte_eth_dev *dev) +{ + if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE)) + pthread_mutex_unlock(&dev->data->flow_ops_mutex); +} + static int flow_err(uint16_t port_id, int ret, struct rte_flow_error *error) { @@ -218,99 +245,6 @@ flow_err(uint16_t port_id, int ret, struct rte_flow_error *error) return ret; } -static enum rte_flow_item_type -rte_flow_expand_rss_item_complete(const struct rte_flow_item *item) -{ - enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID; - uint16_t ether_type = 0; - uint16_t ether_type_m; - uint8_t ip_next_proto = 0; - uint8_t ip_next_proto_m; - - if (item == NULL || item->spec == NULL) - return ret; - switch (item->type) { - case RTE_FLOW_ITEM_TYPE_ETH: - if (item->mask) - ether_type_m = ((const struct rte_flow_item_eth *) - (item->mask))->type; - else - ether_type_m = rte_flow_item_eth_mask.type; - if (ether_type_m != RTE_BE16(0xFFFF)) - break; - ether_type = ((const struct rte_flow_item_eth *) - (item->spec))->type; - if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) - ret = RTE_FLOW_ITEM_TYPE_VLAN; - break; - case RTE_FLOW_ITEM_TYPE_VLAN: - if (item->mask) - ether_type_m = ((const struct rte_flow_item_vlan *) - (item->mask))->inner_type; - else - ether_type_m = rte_flow_item_vlan_mask.inner_type; - if (ether_type_m != RTE_BE16(0xFFFF)) - break; - ether_type = ((const struct rte_flow_item_vlan *) - (item->spec))->inner_type; - if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN) - ret = RTE_FLOW_ITEM_TYPE_VLAN; - break; - case RTE_FLOW_ITEM_TYPE_IPV4: - if (item->mask) - ip_next_proto_m = ((const struct rte_flow_item_ipv4 *) - (item->mask))->hdr.next_proto_id; - else - ip_next_proto_m = - rte_flow_item_ipv4_mask.hdr.next_proto_id; - if (ip_next_proto_m != 0xFF) - break; - ip_next_proto = ((const struct rte_flow_item_ipv4 *) - (item->spec))->hdr.next_proto_id; - if (ip_next_proto == IPPROTO_UDP) - ret = RTE_FLOW_ITEM_TYPE_UDP; - else if (ip_next_proto == IPPROTO_TCP) - ret = RTE_FLOW_ITEM_TYPE_TCP; - else if (ip_next_proto == IPPROTO_IP) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (ip_next_proto == IPPROTO_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - break; - case RTE_FLOW_ITEM_TYPE_IPV6: - if (item->mask) - ip_next_proto_m = ((const struct rte_flow_item_ipv6 *) - (item->mask))->hdr.proto; - else - ip_next_proto_m = - rte_flow_item_ipv6_mask.hdr.proto; - if (ip_next_proto_m != 0xFF) - break; - ip_next_proto = ((const struct rte_flow_item_ipv6 *) - (item->spec))->hdr.proto; - if (ip_next_proto == IPPROTO_UDP) - ret = RTE_FLOW_ITEM_TYPE_UDP; - else if (ip_next_proto == IPPROTO_TCP) - ret = RTE_FLOW_ITEM_TYPE_TCP; - else if (ip_next_proto == IPPROTO_IP) - ret = RTE_FLOW_ITEM_TYPE_IPV4; - else if (ip_next_proto == IPPROTO_IPV6) - ret = RTE_FLOW_ITEM_TYPE_IPV6; - break; - default: - ret = RTE_FLOW_ITEM_TYPE_VOID; - break; - } - return ret; -} - /* Get generic flow operations structure from a port. */ const struct rte_flow_ops * rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error) @@ -345,12 +279,16 @@ rte_flow_validate(uint16_t port_id, { const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + int ret; if (unlikely(!ops)) return -rte_errno; - if (likely(!!ops->validate)) - return flow_err(port_id, ops->validate(dev, attr, pattern, - actions, error), error); + if (likely(!!ops->validate)) { + fts_enter(dev); + ret = ops->validate(dev, attr, pattern, actions, error); + fts_exit(dev); + return flow_err(port_id, ret, error); + } return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, rte_strerror(ENOSYS)); @@ -371,7 +309,9 @@ rte_flow_create(uint16_t port_id, if (unlikely(!ops)) return NULL; if (likely(!!ops->create)) { + fts_enter(dev); flow = ops->create(dev, attr, pattern, actions, error); + fts_exit(dev); if (flow == NULL) flow_err(port_id, -rte_errno, error); return flow; @@ -389,12 +329,16 @@ rte_flow_destroy(uint16_t port_id, { struct rte_eth_dev *dev = &rte_eth_devices[port_id]; const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); + int ret; if (unlikely(!ops)) return -rte_errno; - if (likely(!!ops->destroy)) - return flow_err(port_id, ops->destroy(dev, flow, error), - error); + if (likely(!!ops->destroy)) { + fts_enter(dev); + ret = ops->destroy(dev, flow, error); + fts_exit(dev); + return flow_err(port_id, ret, error); + } return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, rte_strerror(ENOSYS)); @@ -407,11 +351,16 @@ rte_flow_flush(uint16_t port_id, { struct rte_eth_dev *dev = &rte_eth_devices[port_id]; const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); + int ret; if (unlikely(!ops)) return -rte_errno; - if (likely(!!ops->flush)) - return flow_err(port_id, ops->flush(dev, error), error); + if (likely(!!ops->flush)) { + fts_enter(dev); + ret = ops->flush(dev, error); + fts_exit(dev); + return flow_err(port_id, ret, error); + } return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, rte_strerror(ENOSYS)); @@ -427,12 +376,16 @@ rte_flow_query(uint16_t port_id, { struct rte_eth_dev *dev = &rte_eth_devices[port_id]; const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); + int ret; if (!ops) return -rte_errno; - if (likely(!!ops->query)) - return flow_err(port_id, ops->query(dev, flow, action, data, - error), error); + if (likely(!!ops->query)) { + fts_enter(dev); + ret = ops->query(dev, flow, action, data, error); + fts_exit(dev); + return flow_err(port_id, ret, error); + } return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, rte_strerror(ENOSYS)); @@ -446,11 +399,16 @@ rte_flow_isolate(uint16_t port_id, { struct rte_eth_dev *dev = &rte_eth_devices[port_id]; const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); + int ret; if (!ops) return -rte_errno; - if (likely(!!ops->isolate)) - return flow_err(port_id, ops->isolate(dev, set, error), error); + if (likely(!!ops->isolate)) { + fts_enter(dev); + ret = ops->isolate(dev, set, error); + fts_exit(dev); + return flow_err(port_id, ret, error); + } return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, rte_strerror(ENOSYS)); @@ -563,7 +521,11 @@ rte_flow_conv_item_spec(void *buf, const size_t size, } break; default: - off = rte_flow_desc_item[item->type].size; + /** + * allow PMD private flow item + */ + off = (int)item->type >= 0 ? + rte_flow_desc_item[item->type].size : sizeof(void *); rte_memcpy(buf, data, (size > off ? off : size)); break; } @@ -618,7 +580,7 @@ rte_flow_conv_action_conf(void *buf, const size_t size, }), size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size); off = sizeof(*dst.rss); - if (src.rss->key_len) { + if (src.rss->key_len && src.rss->key) { off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key)); tmp = sizeof(*src.rss->key) * src.rss->key_len; if (size >= off + tmp) @@ -666,7 +628,11 @@ rte_flow_conv_action_conf(void *buf, const size_t size, } break; default: - off = rte_flow_desc_action[action->type].size; + /** + * allow PMD private flow action + */ + off = (int)action->type >= 0 ? + rte_flow_desc_action[action->type].size : sizeof(void *); rte_memcpy(buf, action->conf, (size > off ? off : size)); break; } @@ -708,8 +674,12 @@ rte_flow_conv_pattern(struct rte_flow_item *dst, unsigned int i; for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) { - if ((size_t)src->type >= RTE_DIM(rte_flow_desc_item) || - !rte_flow_desc_item[src->type].name) + /** + * allow PMD private flow item + */ + if (((int)src->type >= 0) && + ((size_t)src->type >= RTE_DIM(rte_flow_desc_item) || + !rte_flow_desc_item[src->type].name)) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src, "cannot convert unknown item type"); @@ -797,8 +767,12 @@ rte_flow_conv_actions(struct rte_flow_action *dst, unsigned int i; for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) { - if ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) || - !rte_flow_desc_action[src->type].name) + /** + * allow PMD private flow action + */ + if (((int)src->type >= 0) && + ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) || + !rte_flow_desc_action[src->type].name)) return rte_flow_error_set (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, src, "cannot convert unknown action type"); @@ -1049,203 +1023,238 @@ rte_flow_copy(struct rte_flow_desc *desc, size_t len, return ret; } -/** - * Expand RSS flows into several possible flows according to the RSS hash - * fields requested and the driver capabilities. - */ int -rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size, - const struct rte_flow_item *pattern, uint64_t types, - const struct rte_flow_expand_node graph[], - int graph_root_index) +rte_flow_dev_dump(uint16_t port_id, FILE *file, struct rte_flow_error *error) { - const int elt_n = 8; - const struct rte_flow_item *item; - const struct rte_flow_expand_node *node = &graph[graph_root_index]; - const int *next_node; - const int *stack[elt_n]; - int stack_pos = 0; - struct rte_flow_item flow_items[elt_n]; - unsigned int i; - size_t lsize; - size_t user_pattern_size = 0; - void *addr = NULL; - const struct rte_flow_expand_node *next = NULL; - struct rte_flow_item missed_item; - int missed = 0; - int elt = 0; - const struct rte_flow_item *last_item = NULL; - - memset(&missed_item, 0, sizeof(missed_item)); - lsize = offsetof(struct rte_flow_expand_rss, entry) + - elt_n * sizeof(buf->entry[0]); - if (lsize <= size) { - buf->entry[0].priority = 0; - buf->entry[0].pattern = (void *)&buf->entry[elt_n]; - buf->entries = 0; - addr = buf->entry[0].pattern; + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); + int ret; + + if (unlikely(!ops)) + return -rte_errno; + if (likely(!!ops->dev_dump)) { + fts_enter(dev); + ret = ops->dev_dump(dev, file, error); + fts_exit(dev); + return flow_err(port_id, ret, error); } - for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { - if (item->type != RTE_FLOW_ITEM_TYPE_VOID) - last_item = item; - for (i = 0; node->next && node->next[i]; ++i) { - next = &graph[node->next[i]]; - if (next->type == item->type) - break; - } - if (next) - node = next; - user_pattern_size += sizeof(*item); + return rte_flow_error_set(error, ENOSYS, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, rte_strerror(ENOSYS)); +} + +int +rte_flow_get_aged_flows(uint16_t port_id, void **contexts, + uint32_t nb_contexts, struct rte_flow_error *error) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); + int ret; + + if (unlikely(!ops)) + return -rte_errno; + if (likely(!!ops->get_aged_flows)) { + fts_enter(dev); + ret = ops->get_aged_flows(dev, contexts, nb_contexts, error); + fts_exit(dev); + return flow_err(port_id, ret, error); } - user_pattern_size += sizeof(*item); /* Handle END item. */ - lsize += user_pattern_size; - /* Copy the user pattern in the first entry of the buffer. */ - if (lsize <= size) { - rte_memcpy(addr, pattern, user_pattern_size); - addr = (void *)(((uintptr_t)addr) + user_pattern_size); - buf->entries = 1; + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, rte_strerror(ENOTSUP)); +} + +struct rte_flow_shared_action * +rte_flow_shared_action_create(uint16_t port_id, + const struct rte_flow_shared_action_conf *conf, + const struct rte_flow_action *action, + struct rte_flow_error *error) +{ + struct rte_flow_shared_action *shared_action; + const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); + + if (unlikely(!ops)) + return NULL; + if (unlikely(!ops->shared_action_create)) { + rte_flow_error_set(error, ENOSYS, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + rte_strerror(ENOSYS)); + return NULL; } - /* Start expanding. */ - memset(flow_items, 0, sizeof(flow_items)); - user_pattern_size -= sizeof(*item); - /* - * Check if the last valid item has spec set - * and need complete pattern. - */ - missed_item.type = rte_flow_expand_rss_item_complete(last_item); - if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) { - next = NULL; - missed = 1; - for (i = 0; node->next && node->next[i]; ++i) { - next = &graph[node->next[i]]; - if (next->type == missed_item.type) { - flow_items[0].type = missed_item.type; - flow_items[1].type = RTE_FLOW_ITEM_TYPE_END; - break; - } - next = NULL; - } + shared_action = ops->shared_action_create(&rte_eth_devices[port_id], + conf, action, error); + if (shared_action == NULL) + flow_err(port_id, -rte_errno, error); + return shared_action; +} + +int +rte_flow_shared_action_destroy(uint16_t port_id, + struct rte_flow_shared_action *action, + struct rte_flow_error *error) +{ + int ret; + const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); + + if (unlikely(!ops)) + return -rte_errno; + if (unlikely(!ops->shared_action_destroy)) + return rte_flow_error_set(error, ENOSYS, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, rte_strerror(ENOSYS)); + ret = ops->shared_action_destroy(&rte_eth_devices[port_id], action, + error); + return flow_err(port_id, ret, error); +} + +int +rte_flow_shared_action_update(uint16_t port_id, + struct rte_flow_shared_action *action, + const struct rte_flow_action *update, + struct rte_flow_error *error) +{ + int ret; + const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); + + if (unlikely(!ops)) + return -rte_errno; + if (unlikely(!ops->shared_action_update)) + return rte_flow_error_set(error, ENOSYS, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, rte_strerror(ENOSYS)); + ret = ops->shared_action_update(&rte_eth_devices[port_id], action, + update, error); + return flow_err(port_id, ret, error); +} + +int +rte_flow_shared_action_query(uint16_t port_id, + const struct rte_flow_shared_action *action, + void *data, + struct rte_flow_error *error) +{ + int ret; + const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); + + if (unlikely(!ops)) + return -rte_errno; + if (unlikely(!ops->shared_action_query)) + return rte_flow_error_set(error, ENOSYS, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, rte_strerror(ENOSYS)); + ret = ops->shared_action_query(&rte_eth_devices[port_id], action, + data, error); + return flow_err(port_id, ret, error); +} + +int +rte_flow_tunnel_decap_set(uint16_t port_id, + struct rte_flow_tunnel *tunnel, + struct rte_flow_action **actions, + uint32_t *num_of_actions, + struct rte_flow_error *error) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); + + if (unlikely(!ops)) + return -rte_errno; + if (likely(!!ops->tunnel_decap_set)) { + return flow_err(port_id, + ops->tunnel_decap_set(dev, tunnel, actions, + num_of_actions, error), + error); } - if (next && missed) { - elt = 2; /* missed item + item end. */ - node = next; - lsize += elt * sizeof(*item) + user_pattern_size; - if ((node->rss_types & types) && lsize <= size) { - buf->entry[buf->entries].priority = 1; - buf->entry[buf->entries].pattern = addr; - buf->entries++; - rte_memcpy(addr, buf->entry[0].pattern, - user_pattern_size); - addr = (void *)(((uintptr_t)addr) + user_pattern_size); - rte_memcpy(addr, flow_items, elt * sizeof(*item)); - addr = (void *)(((uintptr_t)addr) + - elt * sizeof(*item)); - } + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, rte_strerror(ENOTSUP)); +} + +int +rte_flow_tunnel_match(uint16_t port_id, + struct rte_flow_tunnel *tunnel, + struct rte_flow_item **items, + uint32_t *num_of_items, + struct rte_flow_error *error) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); + + if (unlikely(!ops)) + return -rte_errno; + if (likely(!!ops->tunnel_match)) { + return flow_err(port_id, + ops->tunnel_match(dev, tunnel, items, + num_of_items, error), + error); } - memset(flow_items, 0, sizeof(flow_items)); - next_node = node->next; - stack[stack_pos] = next_node; - node = next_node ? &graph[*next_node] : NULL; - while (node) { - flow_items[stack_pos].type = node->type; - if (node->rss_types & types) { - /* - * compute the number of items to copy from the - * expansion and copy it. - * When the stack_pos is 0, there are 1 element in it, - * plus the addition END item. - */ - elt = stack_pos + 2; - flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END; - lsize += elt * sizeof(*item) + user_pattern_size; - if (lsize <= size) { - size_t n = elt * sizeof(*item); - - buf->entry[buf->entries].priority = - stack_pos + 1 + missed; - buf->entry[buf->entries].pattern = addr; - buf->entries++; - rte_memcpy(addr, buf->entry[0].pattern, - user_pattern_size); - addr = (void *)(((uintptr_t)addr) + - user_pattern_size); - rte_memcpy(addr, &missed_item, - missed * sizeof(*item)); - addr = (void *)(((uintptr_t)addr) + - missed * sizeof(*item)); - rte_memcpy(addr, flow_items, n); - addr = (void *)(((uintptr_t)addr) + n); - } - } - /* Go deeper. */ - if (node->next) { - next_node = node->next; - if (stack_pos++ == elt_n) { - rte_errno = E2BIG; - return -rte_errno; - } - stack[stack_pos] = next_node; - } else if (*(next_node + 1)) { - /* Follow up with the next possibility. */ - ++next_node; - } else { - /* Move to the next path. */ - if (stack_pos) - next_node = stack[--stack_pos]; - next_node++; - stack[stack_pos] = next_node; - } - node = *next_node ? &graph[*next_node] : NULL; - }; - /* no expanded flows but we have missed item, create one rule for it */ - if (buf->entries == 1 && missed != 0) { - elt = 2; - lsize += elt * sizeof(*item) + user_pattern_size; - if (lsize <= size) { - buf->entry[buf->entries].priority = 1; - buf->entry[buf->entries].pattern = addr; - buf->entries++; - flow_items[0].type = missed_item.type; - flow_items[1].type = RTE_FLOW_ITEM_TYPE_END; - rte_memcpy(addr, buf->entry[0].pattern, - user_pattern_size); - addr = (void *)(((uintptr_t)addr) + user_pattern_size); - rte_memcpy(addr, flow_items, elt * sizeof(*item)); - addr = (void *)(((uintptr_t)addr) + - elt * sizeof(*item)); - } + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, rte_strerror(ENOTSUP)); +} + +int +rte_flow_get_restore_info(uint16_t port_id, + struct rte_mbuf *m, + struct rte_flow_restore_info *restore_info, + struct rte_flow_error *error) +{ + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); + + if (unlikely(!ops)) + return -rte_errno; + if (likely(!!ops->get_restore_info)) { + return flow_err(port_id, + ops->get_restore_info(dev, m, restore_info, + error), + error); } - return lsize; + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, rte_strerror(ENOTSUP)); } int -rte_flow_dev_dump(uint16_t port_id, FILE *file, struct rte_flow_error *error) +rte_flow_tunnel_action_decap_release(uint16_t port_id, + struct rte_flow_action *actions, + uint32_t num_of_actions, + struct rte_flow_error *error) { struct rte_eth_dev *dev = &rte_eth_devices[port_id]; const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); if (unlikely(!ops)) return -rte_errno; - if (likely(!!ops->dev_dump)) - return flow_err(port_id, ops->dev_dump(dev, file, error), + if (likely(!!ops->tunnel_action_decap_release)) { + return flow_err(port_id, + ops->tunnel_action_decap_release(dev, actions, + num_of_actions, + error), error); - return rte_flow_error_set(error, ENOSYS, + } + return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, rte_strerror(ENOSYS)); + NULL, rte_strerror(ENOTSUP)); } int -rte_flow_get_aged_flows(uint16_t port_id, void **contexts, - uint32_t nb_contexts, struct rte_flow_error *error) +rte_flow_tunnel_item_release(uint16_t port_id, + struct rte_flow_item *items, + uint32_t num_of_items, + struct rte_flow_error *error) { struct rte_eth_dev *dev = &rte_eth_devices[port_id]; const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error); if (unlikely(!ops)) return -rte_errno; - if (likely(!!ops->get_aged_flows)) - return flow_err(port_id, ops->get_aged_flows(dev, contexts, - nb_contexts, error), error); + if (likely(!!ops->tunnel_item_release)) { + return flow_err(port_id, + ops->tunnel_item_release(dev, items, + num_of_items, error), + error); + } return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, rte_strerror(ENOTSUP));