net/sfc: support flow API filters
authorRoman Zhukov <roman.zhukov@oktetlabs.ru>
Thu, 9 Mar 2017 15:26:27 +0000 (15:26 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 4 Apr 2017 16:57:21 +0000 (18:57 +0200)
Only pattern items VOID, ETH and actions VOID, QUEUE is now
supported.

Signed-off-by: Roman Zhukov <roman.zhukov@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
doc/guides/nics/features/sfc_efx.ini
doc/guides/nics/sfc_efx.rst
doc/guides/rel_notes/release_17_05.rst
drivers/net/sfc/Makefile
drivers/net/sfc/sfc.c
drivers/net/sfc/sfc_ethdev.c
drivers/net/sfc/sfc_filter.h
drivers/net/sfc/sfc_flow.c [new file with mode: 0644]
drivers/net/sfc/sfc_flow.h [new file with mode: 0644]

index 3a15baa..bb60ad6 100644 (file)
@@ -19,6 +19,7 @@ RSS hash             = Y
 RSS key update       = Y
 RSS reta update      = Y
 Flow control         = Y
+Flow API             = Y
 VLAN offload         = P
 L3 checksum offload  = Y
 L4 checksum offload  = Y
index 0a05a0a..f2e410f 100644 (file)
@@ -81,6 +81,8 @@ SFC EFX PMD has support for:
 
 - Transmit VLAN insertion (if running firmware variant supports it)
 
+- Flow API
+
 
 Non-supported Features
 ----------------------
@@ -114,6 +116,28 @@ required in the receive buffer.
 It should be taken into account when mbuf pool for receive is created.
 
 
+Flow API support
+----------------
+
+Supported attributes:
+
+- Ingress
+
+Supported pattern items:
+
+- VOID
+
+- ETH (exact match of source/destination addresses, EtherType)
+
+Supported actions:
+
+- VOID
+
+- QUEUE
+
+Validating flow rules depends on the firmware variant.
+
+
 Supported NICs
 --------------
 
index fa0da09..5acb4c2 100644 (file)
@@ -84,6 +84,11 @@ New Features
   Added support for Hardware TSO for tunneled and non-tunneled packets.
   Tunneling protocols supported are GRE and VXLAN.
 
+* **Updated the sfc_efx driver.**
+
+  * Generic flow API support for Ethernet, VLAN, IPv4, IPv6, UDP and TCP
+    pattern items with QUEUE action for ingress traffic.
+
 * **Added vmxnet3 version 3 support.**
 
   Added support for vmxnet3 version 3 which includes several
index 423e32e..7fe49d1 100644 (file)
@@ -91,6 +91,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_rx.c
 SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_tx.c
 SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_tso.c
 SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_flow.c
 
 VPATH += $(SRCDIR)/base
 
index 6fd8bf1..3e419b6 100644 (file)
@@ -320,10 +320,17 @@ sfc_start(struct sfc_adapter *sa)
        if (rc != 0)
                goto fail_tx_start;
 
+       rc = sfc_flow_start(sa);
+       if (rc != 0)
+               goto fail_flows_insert;
+
        sa->state = SFC_ADAPTER_STARTED;
        sfc_log_init(sa, "done");
        return 0;
 
+fail_flows_insert:
+       sfc_tx_stop(sa);
+
 fail_tx_start:
        sfc_rx_stop(sa);
 
@@ -368,6 +375,7 @@ sfc_stop(struct sfc_adapter *sa)
 
        sa->state = SFC_ADAPTER_STOPPING;
 
+       sfc_flow_stop(sa);
        sfc_tx_stop(sa);
        sfc_rx_stop(sa);
        sfc_port_stop(sa);
@@ -640,6 +648,8 @@ sfc_attach(struct sfc_adapter *sa)
        sfc_log_init(sa, "fini nic");
        efx_nic_fini(enp);
 
+       sfc_flow_init(sa);
+
        sa->state = SFC_ADAPTER_INITIALIZED;
 
        sfc_log_init(sa, "done");
@@ -698,5 +708,6 @@ sfc_detach(struct sfc_adapter *sa)
 
        sfc_mem_bar_fini(sa);
 
+       sfc_flow_fini(sa);
        sa->state = SFC_ADAPTER_UNINITIALIZED;
 }
index e696dd2..5297159 100644 (file)
@@ -40,7 +40,7 @@
 #include "sfc_ev.h"
 #include "sfc_rx.h"
 #include "sfc_tx.h"
-
+#include "sfc_flow.h"
 
 static void
 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
@@ -1210,8 +1210,8 @@ bad_reta_entry:
 
 static int
 sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
-                   __rte_unused enum rte_filter_op filter_op,
-                   __rte_unused void *arg)
+                   enum rte_filter_op filter_op,
+                   void *arg)
 {
        struct sfc_adapter *sa = dev->data->dev_private;
        int rc = ENOTSUP;
@@ -1246,6 +1246,14 @@ sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
        case RTE_ETH_FILTER_HASH:
                sfc_err(sa, "Hash filters not supported");
                break;
+       case RTE_ETH_FILTER_GENERIC:
+               if (filter_op != RTE_ETH_FILTER_GET) {
+                       rc = EINVAL;
+               } else {
+                       *(const void **)arg = &sfc_flow_ops;
+                       rc = 0;
+               }
+               break;
        default:
                sfc_err(sa, "Unknown filter type %u", filter_type);
                break;
index 5ba1617..fe23eee 100644 (file)
@@ -32,6 +32,8 @@
 
 #include "efx.h"
 
+#include "sfc_flow.h"
+
 #ifdef __cplusplus
 extern "C" {
 #endif
@@ -41,6 +43,8 @@ struct sfc_filter {
        size_t                          supported_match_num;
        /** Driver cache of supported filter match masks */
        uint32_t                        *supported_match;
+       /** List of flow rules */
+       struct sfc_flow_list            flow_list;
 };
 
 struct sfc_adapter;
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
new file mode 100644 (file)
index 0000000..6b20bae
--- /dev/null
@@ -0,0 +1,704 @@
+/*-
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_tailq.h>
+#include <rte_common.h>
+#include <rte_ethdev.h>
+#include <rte_eth_ctrl.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_rx.h"
+#include "sfc_filter.h"
+#include "sfc_flow.h"
+#include "sfc_log.h"
+
+/*
+ * At now flow API is implemented in such a manner that each
+ * flow rule is converted to a hardware filter.
+ * All elements of flow rule (attributes, pattern items, actions)
+ * correspond to one or more fields in the efx_filter_spec_s structure
+ * that is responsible for the hardware filter.
+ */
+
+enum sfc_flow_item_layers {
+       SFC_FLOW_ITEM_ANY_LAYER,
+       SFC_FLOW_ITEM_START_LAYER,
+       SFC_FLOW_ITEM_L2,
+};
+
+typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
+                                 efx_filter_spec_t *spec,
+                                 struct rte_flow_error *error);
+
+struct sfc_flow_item {
+       enum rte_flow_item_type type;           /* Type of item */
+       enum sfc_flow_item_layers layer;        /* Layer of item */
+       enum sfc_flow_item_layers prev_layer;   /* Previous layer of item */
+       sfc_flow_item_parse *parse;             /* Parsing function */
+};
+
+static sfc_flow_item_parse sfc_flow_parse_void;
+static sfc_flow_item_parse sfc_flow_parse_eth;
+
+static boolean_t
+sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
+{
+       uint8_t sum = 0;
+       unsigned int i;
+
+       for (i = 0; i < size; i++)
+               sum |= buf[i];
+
+       return (sum == 0) ? B_TRUE : B_FALSE;
+}
+
+/*
+ * Validate item and prepare structures spec and mask for parsing
+ */
+static int
+sfc_flow_parse_init(const struct rte_flow_item *item,
+                   const void **spec_ptr,
+                   const void **mask_ptr,
+                   const void *supp_mask,
+                   const void *def_mask,
+                   unsigned int size,
+                   struct rte_flow_error *error)
+{
+       const uint8_t *spec;
+       const uint8_t *mask;
+       const uint8_t *last;
+       uint8_t match;
+       uint8_t supp;
+       unsigned int i;
+
+       if (item == NULL) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+                                  "NULL item");
+               return -rte_errno;
+       }
+
+       if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                  "Mask or last is set without spec");
+               return -rte_errno;
+       }
+
+       /*
+        * If "mask" is not set, default mask is used,
+        * but if default mask is NULL, "mask" should be set
+        */
+       if (item->mask == NULL) {
+               if (def_mask == NULL) {
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+                               "Mask should be specified");
+                       return -rte_errno;
+               }
+
+               mask = (const uint8_t *)def_mask;
+       } else {
+               mask = (const uint8_t *)item->mask;
+       }
+
+       spec = (const uint8_t *)item->spec;
+       last = (const uint8_t *)item->last;
+
+       if (spec == NULL)
+               goto exit;
+
+       /*
+        * If field values in "last" are either 0 or equal to the corresponding
+        * values in "spec" then they are ignored
+        */
+       if (last != NULL &&
+           !sfc_flow_is_zero(last, size) &&
+           memcmp(last, spec, size) != 0) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                  "Ranging is not supported");
+               return -rte_errno;
+       }
+
+       if (supp_mask == NULL) {
+               rte_flow_error_set(error, EINVAL,
+                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                       "Supported mask for item should be specified");
+               return -rte_errno;
+       }
+
+       /* Check that mask and spec not asks for more match than supp_mask */
+       for (i = 0; i < size; i++) {
+               match = spec[i] | mask[i];
+               supp = ((const uint8_t *)supp_mask)[i];
+
+               if ((match | supp) != supp) {
+                       rte_flow_error_set(error, ENOTSUP,
+                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
+                                          "Item's field is not supported");
+                       return -rte_errno;
+               }
+       }
+
+exit:
+       *spec_ptr = spec;
+       *mask_ptr = mask;
+       return 0;
+}
+
+/*
+ * Protocol parsers.
+ * Masking is not supported, so masks in items should be either
+ * full or empty (zeroed) and set only for supported fields which
+ * are specified in the supp_mask.
+ */
+
+static int
+sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
+                   __rte_unused efx_filter_spec_t *efx_spec,
+                   __rte_unused struct rte_flow_error *error)
+{
+       return 0;
+}
+
+/**
+ * Convert Ethernet item to EFX filter specification.
+ *
+ * @param item[in]
+ *   Item specification. Only source and destination addresses and
+ *   Ethernet type fields are supported. If the mask is NULL, default
+ *   mask will be used. Ranging is not supported.
+ * @param efx_spec[in, out]
+ *   EFX filter specification to update.
+ * @param[out] error
+ *   Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_eth(const struct rte_flow_item *item,
+                  efx_filter_spec_t *efx_spec,
+                  struct rte_flow_error *error)
+{
+       int rc;
+       const struct rte_flow_item_eth *spec = NULL;
+       const struct rte_flow_item_eth *mask = NULL;
+       const struct rte_flow_item_eth supp_mask = {
+               .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+               .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+               .type = 0xffff,
+       };
+
+       rc = sfc_flow_parse_init(item,
+                                (const void **)&spec,
+                                (const void **)&mask,
+                                &supp_mask,
+                                &rte_flow_item_eth_mask,
+                                sizeof(struct rte_flow_item_eth),
+                                error);
+       if (rc != 0)
+               return rc;
+
+       /* If "spec" is not set, could be any Ethernet */
+       if (spec == NULL)
+               return 0;
+
+       if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
+               efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+               rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
+                          EFX_MAC_ADDR_LEN);
+       } else if (!is_zero_ether_addr(&mask->dst)) {
+               goto fail_bad_mask;
+       }
+
+       if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
+               efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
+               rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
+                          EFX_MAC_ADDR_LEN);
+       } else if (!is_zero_ether_addr(&mask->src)) {
+               goto fail_bad_mask;
+       }
+
+       /*
+        * Ether type is in big-endian byte order in item and
+        * in little-endian in efx_spec, so byte swap is used
+        */
+       if (mask->type == supp_mask.type) {
+               efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+               efx_spec->efs_ether_type = rte_bswap16(spec->type);
+       } else if (mask->type != 0) {
+               goto fail_bad_mask;
+       }
+
+       return 0;
+
+fail_bad_mask:
+       rte_flow_error_set(error, EINVAL,
+                          RTE_FLOW_ERROR_TYPE_ITEM, item,
+                          "Bad mask in the ETH pattern item");
+       return -rte_errno;
+}
+
+static const struct sfc_flow_item sfc_flow_items[] = {
+       {
+               .type = RTE_FLOW_ITEM_TYPE_VOID,
+               .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
+               .layer = SFC_FLOW_ITEM_ANY_LAYER,
+               .parse = sfc_flow_parse_void,
+       },
+       {
+               .type = RTE_FLOW_ITEM_TYPE_ETH,
+               .prev_layer = SFC_FLOW_ITEM_START_LAYER,
+               .layer = SFC_FLOW_ITEM_L2,
+               .parse = sfc_flow_parse_eth,
+       },
+};
+
+/*
+ * Protocol-independent flow API support
+ */
+static int
+sfc_flow_parse_attr(const struct rte_flow_attr *attr,
+                   struct rte_flow *flow,
+                   struct rte_flow_error *error)
+{
+       if (attr == NULL) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ATTR, NULL,
+                                  "NULL attribute");
+               return -rte_errno;
+       }
+       if (attr->group != 0) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
+                                  "Groups are not supported");
+               return -rte_errno;
+       }
+       if (attr->priority != 0) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
+                                  "Priorities are not supported");
+               return -rte_errno;
+       }
+       if (attr->egress != 0) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
+                                  "Egress is not supported");
+               return -rte_errno;
+       }
+       if (attr->ingress == 0) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
+                                  "Only ingress is supported");
+               return -rte_errno;
+       }
+
+       flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
+       flow->spec.efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
+
+       return 0;
+}
+
+/* Get item from array sfc_flow_items */
+static const struct sfc_flow_item *
+sfc_flow_get_item(enum rte_flow_item_type type)
+{
+       unsigned int i;
+
+       for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
+               if (sfc_flow_items[i].type == type)
+                       return &sfc_flow_items[i];
+
+       return NULL;
+}
+
+static int
+sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
+                      struct rte_flow *flow,
+                      struct rte_flow_error *error)
+{
+       int rc;
+       unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
+       const struct sfc_flow_item *item;
+
+       if (pattern == NULL) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
+                                  "NULL pattern");
+               return -rte_errno;
+       }
+
+       for (; pattern != NULL &&
+              pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+               item = sfc_flow_get_item(pattern->type);
+               if (item == NULL) {
+                       rte_flow_error_set(error, ENOTSUP,
+                                          RTE_FLOW_ERROR_TYPE_ITEM, pattern,
+                                          "Unsupported pattern item");
+                       return -rte_errno;
+               }
+
+               /*
+                * Omitting one or several protocol layers at the beginning
+                * of pattern is supported
+                */
+               if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
+                   prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
+                   item->prev_layer != prev_layer) {
+                       rte_flow_error_set(error, ENOTSUP,
+                                          RTE_FLOW_ERROR_TYPE_ITEM, pattern,
+                                          "Unexpected sequence of pattern items");
+                       return -rte_errno;
+               }
+
+               rc = item->parse(pattern, &flow->spec, error);
+               if (rc != 0)
+                       return rc;
+
+               if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
+                       prev_layer = item->layer;
+       }
+
+       if (pattern == NULL) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+                                  "NULL item");
+               return -rte_errno;
+       }
+
+       return 0;
+}
+
+static int
+sfc_flow_parse_queue(struct sfc_adapter *sa,
+                    const struct rte_flow_action_queue *queue,
+                    struct rte_flow *flow)
+{
+       struct sfc_rxq *rxq;
+
+       if (queue->index >= sa->rxq_count)
+               return -EINVAL;
+
+       rxq = sa->rxq_info[queue->index].rxq;
+       flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
+
+       return 0;
+}
+
+static int
+sfc_flow_parse_actions(struct sfc_adapter *sa,
+                      const struct rte_flow_action actions[],
+                      struct rte_flow *flow,
+                      struct rte_flow_error *error)
+{
+       int rc;
+       boolean_t is_specified = B_FALSE;
+
+       if (actions == NULL) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
+                                  "NULL actions");
+               return -rte_errno;
+       }
+
+       for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+               switch (actions->type) {
+               case RTE_FLOW_ACTION_TYPE_VOID:
+                       break;
+
+               case RTE_FLOW_ACTION_TYPE_QUEUE:
+                       rc = sfc_flow_parse_queue(sa, actions->conf, flow);
+                       if (rc != 0) {
+                               rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ACTION, actions,
+                                       "Bad QUEUE action");
+                               return -rte_errno;
+                       }
+
+                       is_specified = B_TRUE;
+                       break;
+
+               default:
+                       rte_flow_error_set(error, ENOTSUP,
+                                          RTE_FLOW_ERROR_TYPE_ACTION, actions,
+                                          "Action is not supported");
+                       return -rte_errno;
+               }
+       }
+
+       if (!is_specified) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
+                                  "Action is unspecified");
+               return -rte_errno;
+       }
+
+       return 0;
+}
+
+static int
+sfc_flow_parse(struct rte_eth_dev *dev,
+              const struct rte_flow_attr *attr,
+              const struct rte_flow_item pattern[],
+              const struct rte_flow_action actions[],
+              struct rte_flow *flow,
+              struct rte_flow_error *error)
+{
+       struct sfc_adapter *sa = dev->data->dev_private;
+       int rc;
+
+       memset(&flow->spec, 0, sizeof(flow->spec));
+
+       rc = sfc_flow_parse_attr(attr, flow, error);
+       if (rc != 0)
+               goto fail_bad_value;
+
+       rc = sfc_flow_parse_pattern(pattern, flow, error);
+       if (rc != 0)
+               goto fail_bad_value;
+
+       rc = sfc_flow_parse_actions(sa, actions, flow, error);
+       if (rc != 0)
+               goto fail_bad_value;
+
+       if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
+               rte_flow_error_set(error, ENOTSUP,
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                  "Flow rule pattern is not supported");
+               return -rte_errno;
+       }
+
+fail_bad_value:
+       return rc;
+}
+
+static int
+sfc_flow_validate(struct rte_eth_dev *dev,
+                 const struct rte_flow_attr *attr,
+                 const struct rte_flow_item pattern[],
+                 const struct rte_flow_action actions[],
+                 struct rte_flow_error *error)
+{
+       struct rte_flow flow;
+
+       return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
+}
+
+static struct rte_flow *
+sfc_flow_create(struct rte_eth_dev *dev,
+               const struct rte_flow_attr *attr,
+               const struct rte_flow_item pattern[],
+               const struct rte_flow_action actions[],
+               struct rte_flow_error *error)
+{
+       struct sfc_adapter *sa = dev->data->dev_private;
+       struct rte_flow *flow = NULL;
+       int rc;
+
+       flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
+       if (flow == NULL) {
+               rte_flow_error_set(error, ENOMEM,
+                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                  "Failed to allocate memory");
+               goto fail_no_mem;
+       }
+
+       rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
+       if (rc != 0)
+               goto fail_bad_value;
+
+       TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
+
+       sfc_adapter_lock(sa);
+
+       if (sa->state == SFC_ADAPTER_STARTED) {
+               rc = efx_filter_insert(sa->nic, &flow->spec);
+               if (rc != 0) {
+                       rte_flow_error_set(error, rc,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                               "Failed to insert filter");
+                       goto fail_filter_insert;
+               }
+       }
+
+       sfc_adapter_unlock(sa);
+
+       return flow;
+
+fail_filter_insert:
+       TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
+
+fail_bad_value:
+       rte_free(flow);
+       sfc_adapter_unlock(sa);
+
+fail_no_mem:
+       return NULL;
+}
+
+static int
+sfc_flow_remove(struct sfc_adapter *sa,
+               struct rte_flow *flow,
+               struct rte_flow_error *error)
+{
+       int rc = 0;
+
+       SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+       if (sa->state == SFC_ADAPTER_STARTED) {
+               rc = efx_filter_remove(sa->nic, &flow->spec);
+               if (rc != 0)
+                       rte_flow_error_set(error, rc,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                               "Failed to destroy flow rule");
+       }
+
+       TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
+       rte_free(flow);
+
+       return rc;
+}
+
+static int
+sfc_flow_destroy(struct rte_eth_dev *dev,
+                struct rte_flow *flow,
+                struct rte_flow_error *error)
+{
+       struct sfc_adapter *sa = dev->data->dev_private;
+       struct rte_flow *flow_ptr;
+       int rc = EINVAL;
+
+       sfc_adapter_lock(sa);
+
+       TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
+               if (flow_ptr == flow)
+                       rc = 0;
+       }
+       if (rc != 0) {
+               rte_flow_error_set(error, rc,
+                                  RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+                                  "Failed to find flow rule to destroy");
+               goto fail_bad_value;
+       }
+
+       rc = sfc_flow_remove(sa, flow, error);
+
+fail_bad_value:
+       sfc_adapter_unlock(sa);
+
+       return -rc;
+}
+
+static int
+sfc_flow_flush(struct rte_eth_dev *dev,
+              struct rte_flow_error *error)
+{
+       struct sfc_adapter *sa = dev->data->dev_private;
+       struct rte_flow *flow;
+       int rc = 0;
+       int ret = 0;
+
+       sfc_adapter_lock(sa);
+
+       while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
+               rc = sfc_flow_remove(sa, flow, error);
+               if (rc != 0)
+                       ret = rc;
+       }
+
+       sfc_adapter_unlock(sa);
+
+       return -ret;
+}
+
+const struct rte_flow_ops sfc_flow_ops = {
+       .validate = sfc_flow_validate,
+       .create = sfc_flow_create,
+       .destroy = sfc_flow_destroy,
+       .flush = sfc_flow_flush,
+       .query = NULL,
+};
+
+void
+sfc_flow_init(struct sfc_adapter *sa)
+{
+       SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+       TAILQ_INIT(&sa->filter.flow_list);
+}
+
+void
+sfc_flow_fini(struct sfc_adapter *sa)
+{
+       struct rte_flow *flow;
+
+       SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+       while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
+               TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
+               rte_free(flow);
+       }
+}
+
+void
+sfc_flow_stop(struct sfc_adapter *sa)
+{
+       struct rte_flow *flow;
+
+       SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+       TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
+               efx_filter_remove(sa->nic, &flow->spec);
+}
+
+int
+sfc_flow_start(struct sfc_adapter *sa)
+{
+       struct rte_flow *flow;
+       int rc = 0;
+
+       sfc_log_init(sa, "entry");
+
+       SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+       TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
+               rc = efx_filter_insert(sa->nic, &flow->spec);
+               if (rc != 0)
+                       goto fail_bad_flow;
+       }
+
+       sfc_log_init(sa, "done");
+
+fail_bad_flow:
+       return rc;
+}
diff --git a/drivers/net/sfc/sfc_flow.h b/drivers/net/sfc/sfc_flow.h
new file mode 100644 (file)
index 0000000..d38ac35
--- /dev/null
@@ -0,0 +1,62 @@
+/*-
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_FLOW_H
+#define _SFC_FLOW_H
+
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "efx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* PMD-specific definition of the opaque type from rte_flow.h */
+struct rte_flow {
+       efx_filter_spec_t spec;         /* filter specification */
+       TAILQ_ENTRY(rte_flow) entries;  /* flow list entries */
+};
+
+TAILQ_HEAD(sfc_flow_list, rte_flow);
+
+extern const struct rte_flow_ops sfc_flow_ops;
+
+struct sfc_adapter;
+
+void sfc_flow_init(struct sfc_adapter *sa);
+void sfc_flow_fini(struct sfc_adapter *sa);
+int sfc_flow_start(struct sfc_adapter *sa);
+void sfc_flow_stop(struct sfc_adapter *sa);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_FLOW_H */