ethdev: make flow API thread safe
[dpdk.git] / lib / librte_ethdev / rte_flow.c
index f8fdd68..4101b27 100644 (file)
@@ -207,6 +207,20 @@ error:
        return -rte_errno;
 }
 
+static inline void
+fts_enter(struct rte_eth_dev *dev)
+{
+       if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
+               pthread_mutex_lock(&dev->data->flow_ops_mutex);
+}
+
+static inline void
+fts_exit(struct rte_eth_dev *dev)
+{
+       if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
+               pthread_mutex_unlock(&dev->data->flow_ops_mutex);
+}
+
 static int
 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
 {
@@ -219,99 +233,6 @@ flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
        return ret;
 }
 
-static enum rte_flow_item_type
-rte_flow_expand_rss_item_complete(const struct rte_flow_item *item)
-{
-       enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID;
-       uint16_t ether_type = 0;
-       uint16_t ether_type_m;
-       uint8_t ip_next_proto = 0;
-       uint8_t ip_next_proto_m;
-
-       if (item == NULL || item->spec == NULL)
-               return ret;
-       switch (item->type) {
-       case RTE_FLOW_ITEM_TYPE_ETH:
-               if (item->mask)
-                       ether_type_m = ((const struct rte_flow_item_eth *)
-                                               (item->mask))->type;
-               else
-                       ether_type_m = rte_flow_item_eth_mask.type;
-               if (ether_type_m != RTE_BE16(0xFFFF))
-                       break;
-               ether_type = ((const struct rte_flow_item_eth *)
-                               (item->spec))->type;
-               if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV4;
-               else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV6;
-               else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
-                       ret = RTE_FLOW_ITEM_TYPE_VLAN;
-               break;
-       case RTE_FLOW_ITEM_TYPE_VLAN:
-               if (item->mask)
-                       ether_type_m = ((const struct rte_flow_item_vlan *)
-                                               (item->mask))->inner_type;
-               else
-                       ether_type_m = rte_flow_item_vlan_mask.inner_type;
-               if (ether_type_m != RTE_BE16(0xFFFF))
-                       break;
-               ether_type = ((const struct rte_flow_item_vlan *)
-                               (item->spec))->inner_type;
-               if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV4;
-               else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV6;
-               else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
-                       ret = RTE_FLOW_ITEM_TYPE_VLAN;
-               break;
-       case RTE_FLOW_ITEM_TYPE_IPV4:
-               if (item->mask)
-                       ip_next_proto_m = ((const struct rte_flow_item_ipv4 *)
-                                       (item->mask))->hdr.next_proto_id;
-               else
-                       ip_next_proto_m =
-                               rte_flow_item_ipv4_mask.hdr.next_proto_id;
-               if (ip_next_proto_m != 0xFF)
-                       break;
-               ip_next_proto = ((const struct rte_flow_item_ipv4 *)
-                               (item->spec))->hdr.next_proto_id;
-               if (ip_next_proto == IPPROTO_UDP)
-                       ret = RTE_FLOW_ITEM_TYPE_UDP;
-               else if (ip_next_proto == IPPROTO_TCP)
-                       ret = RTE_FLOW_ITEM_TYPE_TCP;
-               else if (ip_next_proto == IPPROTO_IP)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV4;
-               else if (ip_next_proto == IPPROTO_IPV6)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV6;
-               break;
-       case RTE_FLOW_ITEM_TYPE_IPV6:
-               if (item->mask)
-                       ip_next_proto_m = ((const struct rte_flow_item_ipv6 *)
-                                               (item->mask))->hdr.proto;
-               else
-                       ip_next_proto_m =
-                               rte_flow_item_ipv6_mask.hdr.proto;
-               if (ip_next_proto_m != 0xFF)
-                       break;
-               ip_next_proto = ((const struct rte_flow_item_ipv6 *)
-                               (item->spec))->hdr.proto;
-               if (ip_next_proto == IPPROTO_UDP)
-                       ret = RTE_FLOW_ITEM_TYPE_UDP;
-               else if (ip_next_proto == IPPROTO_TCP)
-                       ret = RTE_FLOW_ITEM_TYPE_TCP;
-               else if (ip_next_proto == IPPROTO_IP)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV4;
-               else if (ip_next_proto == IPPROTO_IPV6)
-                       ret = RTE_FLOW_ITEM_TYPE_IPV6;
-               break;
-       default:
-               ret = RTE_FLOW_ITEM_TYPE_VOID;
-               break;
-       }
-       return ret;
-}
-
 /* Get generic flow operations structure from a port. */
 const struct rte_flow_ops *
 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
@@ -346,12 +267,16 @@ rte_flow_validate(uint16_t port_id,
 {
        const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
        struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+       int ret;
 
        if (unlikely(!ops))
                return -rte_errno;
-       if (likely(!!ops->validate))
-               return flow_err(port_id, ops->validate(dev, attr, pattern,
-                                                      actions, error), error);
+       if (likely(!!ops->validate)) {
+               fts_enter(dev);
+               ret = ops->validate(dev, attr, pattern, actions, error);
+               fts_exit(dev);
+               return flow_err(port_id, ret, error);
+       }
        return rte_flow_error_set(error, ENOSYS,
                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                  NULL, rte_strerror(ENOSYS));
@@ -372,7 +297,9 @@ rte_flow_create(uint16_t port_id,
        if (unlikely(!ops))
                return NULL;
        if (likely(!!ops->create)) {
+               fts_enter(dev);
                flow = ops->create(dev, attr, pattern, actions, error);
+               fts_exit(dev);
                if (flow == NULL)
                        flow_err(port_id, -rte_errno, error);
                return flow;
@@ -390,12 +317,16 @@ rte_flow_destroy(uint16_t port_id,
 {
        struct rte_eth_dev *dev = &rte_eth_devices[port_id];
        const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+       int ret;
 
        if (unlikely(!ops))
                return -rte_errno;
-       if (likely(!!ops->destroy))
-               return flow_err(port_id, ops->destroy(dev, flow, error),
-                               error);
+       if (likely(!!ops->destroy)) {
+               fts_enter(dev);
+               ret = ops->destroy(dev, flow, error);
+               fts_exit(dev);
+               return flow_err(port_id, ret, error);
+       }
        return rte_flow_error_set(error, ENOSYS,
                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                  NULL, rte_strerror(ENOSYS));
@@ -408,11 +339,16 @@ rte_flow_flush(uint16_t port_id,
 {
        struct rte_eth_dev *dev = &rte_eth_devices[port_id];
        const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+       int ret;
 
        if (unlikely(!ops))
                return -rte_errno;
-       if (likely(!!ops->flush))
-               return flow_err(port_id, ops->flush(dev, error), error);
+       if (likely(!!ops->flush)) {
+               fts_enter(dev);
+               ret = ops->flush(dev, error);
+               fts_exit(dev);
+               return flow_err(port_id, ret, error);
+       }
        return rte_flow_error_set(error, ENOSYS,
                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                  NULL, rte_strerror(ENOSYS));
@@ -428,12 +364,16 @@ rte_flow_query(uint16_t port_id,
 {
        struct rte_eth_dev *dev = &rte_eth_devices[port_id];
        const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+       int ret;
 
        if (!ops)
                return -rte_errno;
-       if (likely(!!ops->query))
-               return flow_err(port_id, ops->query(dev, flow, action, data,
-                                                   error), error);
+       if (likely(!!ops->query)) {
+               fts_enter(dev);
+               ret = ops->query(dev, flow, action, data, error);
+               fts_exit(dev);
+               return flow_err(port_id, ret, error);
+       }
        return rte_flow_error_set(error, ENOSYS,
                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                  NULL, rte_strerror(ENOSYS));
@@ -447,11 +387,16 @@ rte_flow_isolate(uint16_t port_id,
 {
        struct rte_eth_dev *dev = &rte_eth_devices[port_id];
        const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+       int ret;
 
        if (!ops)
                return -rte_errno;
-       if (likely(!!ops->isolate))
-               return flow_err(port_id, ops->isolate(dev, set, error), error);
+       if (likely(!!ops->isolate)) {
+               fts_enter(dev);
+               ret = ops->isolate(dev, set, error);
+               fts_exit(dev);
+               return flow_err(port_id, ret, error);
+       }
        return rte_flow_error_set(error, ENOSYS,
                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                  NULL, rte_strerror(ENOSYS));
@@ -1050,186 +995,21 @@ rte_flow_copy(struct rte_flow_desc *desc, size_t len,
        return ret;
 }
 
-/**
- * Expand RSS flows into several possible flows according to the RSS hash
- * fields requested and the driver capabilities.
- */
-int
-rte_flow_expand_rss(struct rte_flow_expand_rss *buf, size_t size,
-                   const struct rte_flow_item *pattern, uint64_t types,
-                   const struct rte_flow_expand_node graph[],
-                   int graph_root_index)
-{
-       const int elt_n = 8;
-       const struct rte_flow_item *item;
-       const struct rte_flow_expand_node *node = &graph[graph_root_index];
-       const int *next_node;
-       const int *stack[elt_n];
-       int stack_pos = 0;
-       struct rte_flow_item flow_items[elt_n];
-       unsigned int i;
-       size_t lsize;
-       size_t user_pattern_size = 0;
-       void *addr = NULL;
-       const struct rte_flow_expand_node *next = NULL;
-       struct rte_flow_item missed_item;
-       int missed = 0;
-       int elt = 0;
-       const struct rte_flow_item *last_item = NULL;
-
-       memset(&missed_item, 0, sizeof(missed_item));
-       lsize = offsetof(struct rte_flow_expand_rss, entry) +
-               elt_n * sizeof(buf->entry[0]);
-       if (lsize <= size) {
-               buf->entry[0].priority = 0;
-               buf->entry[0].pattern = (void *)&buf->entry[elt_n];
-               buf->entries = 0;
-               addr = buf->entry[0].pattern;
-       }
-       for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
-               if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
-                       last_item = item;
-               for (i = 0; node->next && node->next[i]; ++i) {
-                       next = &graph[node->next[i]];
-                       if (next->type == item->type)
-                               break;
-               }
-               if (next)
-                       node = next;
-               user_pattern_size += sizeof(*item);
-       }
-       user_pattern_size += sizeof(*item); /* Handle END item. */
-       lsize += user_pattern_size;
-       /* Copy the user pattern in the first entry of the buffer. */
-       if (lsize <= size) {
-               rte_memcpy(addr, pattern, user_pattern_size);
-               addr = (void *)(((uintptr_t)addr) + user_pattern_size);
-               buf->entries = 1;
-       }
-       /* Start expanding. */
-       memset(flow_items, 0, sizeof(flow_items));
-       user_pattern_size -= sizeof(*item);
-       /*
-        * Check if the last valid item has spec set
-        * and need complete pattern.
-        */
-       missed_item.type = rte_flow_expand_rss_item_complete(last_item);
-       if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) {
-               next = NULL;
-               missed = 1;
-               for (i = 0; node->next && node->next[i]; ++i) {
-                       next = &graph[node->next[i]];
-                       if (next->type == missed_item.type) {
-                               flow_items[0].type = missed_item.type;
-                               flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
-                               break;
-                       }
-                       next = NULL;
-               }
-       }
-       if (next && missed) {
-               elt = 2; /* missed item + item end. */
-               node = next;
-               lsize += elt * sizeof(*item) + user_pattern_size;
-               if ((node->rss_types & types) && lsize <= size) {
-                       buf->entry[buf->entries].priority = 1;
-                       buf->entry[buf->entries].pattern = addr;
-                       buf->entries++;
-                       rte_memcpy(addr, buf->entry[0].pattern,
-                                  user_pattern_size);
-                       addr = (void *)(((uintptr_t)addr) + user_pattern_size);
-                       rte_memcpy(addr, flow_items, elt * sizeof(*item));
-                       addr = (void *)(((uintptr_t)addr) +
-                                       elt * sizeof(*item));
-               }
-       }
-       memset(flow_items, 0, sizeof(flow_items));
-       next_node = node->next;
-       stack[stack_pos] = next_node;
-       node = next_node ? &graph[*next_node] : NULL;
-       while (node) {
-               flow_items[stack_pos].type = node->type;
-               if (node->rss_types & types) {
-                       /*
-                        * compute the number of items to copy from the
-                        * expansion and copy it.
-                        * When the stack_pos is 0, there are 1 element in it,
-                        * plus the addition END item.
-                        */
-                       elt = stack_pos + 2;
-                       flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
-                       lsize += elt * sizeof(*item) + user_pattern_size;
-                       if (lsize <= size) {
-                               size_t n = elt * sizeof(*item);
-
-                               buf->entry[buf->entries].priority =
-                                       stack_pos + 1 + missed;
-                               buf->entry[buf->entries].pattern = addr;
-                               buf->entries++;
-                               rte_memcpy(addr, buf->entry[0].pattern,
-                                          user_pattern_size);
-                               addr = (void *)(((uintptr_t)addr) +
-                                               user_pattern_size);
-                               rte_memcpy(addr, &missed_item,
-                                          missed * sizeof(*item));
-                               addr = (void *)(((uintptr_t)addr) +
-                                       missed * sizeof(*item));
-                               rte_memcpy(addr, flow_items, n);
-                               addr = (void *)(((uintptr_t)addr) + n);
-                       }
-               }
-               /* Go deeper. */
-               if (node->next) {
-                       next_node = node->next;
-                       if (stack_pos++ == elt_n) {
-                               rte_errno = E2BIG;
-                               return -rte_errno;
-                       }
-                       stack[stack_pos] = next_node;
-               } else if (*(next_node + 1)) {
-                       /* Follow up with the next possibility. */
-                       ++next_node;
-               } else {
-                       /* Move to the next path. */
-                       if (stack_pos)
-                               next_node = stack[--stack_pos];
-                       next_node++;
-                       stack[stack_pos] = next_node;
-               }
-               node = *next_node ? &graph[*next_node] : NULL;
-       };
-       /* no expanded flows but we have missed item, create one rule for it */
-       if (buf->entries == 1 && missed != 0) {
-               elt = 2;
-               lsize += elt * sizeof(*item) + user_pattern_size;
-               if (lsize <= size) {
-                       buf->entry[buf->entries].priority = 1;
-                       buf->entry[buf->entries].pattern = addr;
-                       buf->entries++;
-                       flow_items[0].type = missed_item.type;
-                       flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
-                       rte_memcpy(addr, buf->entry[0].pattern,
-                                  user_pattern_size);
-                       addr = (void *)(((uintptr_t)addr) + user_pattern_size);
-                       rte_memcpy(addr, flow_items, elt * sizeof(*item));
-                       addr = (void *)(((uintptr_t)addr) +
-                                       elt * sizeof(*item));
-               }
-       }
-       return lsize;
-}
-
 int
 rte_flow_dev_dump(uint16_t port_id, FILE *file, struct rte_flow_error *error)
 {
        struct rte_eth_dev *dev = &rte_eth_devices[port_id];
        const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+       int ret;
 
        if (unlikely(!ops))
                return -rte_errno;
-       if (likely(!!ops->dev_dump))
-               return flow_err(port_id, ops->dev_dump(dev, file, error),
-                               error);
+       if (likely(!!ops->dev_dump)) {
+               fts_enter(dev);
+               ret = ops->dev_dump(dev, file, error);
+               fts_exit(dev);
+               return flow_err(port_id, ret, error);
+       }
        return rte_flow_error_set(error, ENOSYS,
                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                  NULL, rte_strerror(ENOSYS));
@@ -1241,12 +1021,16 @@ rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
 {
        struct rte_eth_dev *dev = &rte_eth_devices[port_id];
        const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+       int ret;
 
        if (unlikely(!ops))
                return -rte_errno;
-       if (likely(!!ops->get_aged_flows))
-               return flow_err(port_id, ops->get_aged_flows(dev, contexts,
-                               nb_contexts, error), error);
+       if (likely(!!ops->get_aged_flows)) {
+               fts_enter(dev);
+               ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
+               fts_exit(dev);
+               return flow_err(port_id, ret, error);
+       }
        return rte_flow_error_set(error, ENOTSUP,
                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                  NULL, rte_strerror(ENOTSUP));