net/enic: extend flow director support for 1300 series
authorJohn Daley <johndale@cisco.com>
Thu, 29 Sep 2016 20:56:39 +0000 (13:56 -0700)
committerBruce Richardson <bruce.richardson@intel.com>
Thu, 13 Oct 2016 13:30:59 +0000 (15:30 +0200)
1300 series Cisco adapter firmware version 2.0(13) for UCS
C-series servers and 3.1(2) for blade servers supports more
filtering capabilities. The feature can be enabled via Cisco
CIMC or USCM with the 'advanced filters' radio button. When
enabled, the these additional flow director modes are available:
RTE_ETH_FLOW_NONFRAG_IPV4_OTHER
RTE_ETH_FLOW_NONFRAG_IPV4_SCTP
RTE_ETH_FLOW_NONFRAG_IPV6_UDP
RTE_ETH_FLOW_NONFRAG_IPV6_TCP
RTE_ETH_FLOW_NONFRAG_IPV6_SCTP
RTE_ETH_FLOW_NONFRAG_IPV6_OTHER

Changes:
- Detect and set an 'advanced filters' flag dependent on the adapter
  capability.
- Implement RTE_ETH_FILTER_INFO filter op to return the flow types
  available dependent on whether advanced filters are enabled.
- Use a function pointer to select how filters are added to the adapter:
  copy_fltr_v1() for older firmware/adapters or copy_fltr_v2() for
  adapters which support advanced filters.
- Apply fdir global masks to filters when in advanced filter mode.
- Update documentation.

Signed-off-by: John Daley <johndale@cisco.com>
Reviewed-by: Nelson Escobar <neescoba@cisco.com>
doc/guides/nics/enic.rst
doc/guides/nics/features/enic.ini
drivers/net/enic/base/vnic_dev.c
drivers/net/enic/base/vnic_dev.h
drivers/net/enic/enic.h
drivers/net/enic/enic_clsf.c
drivers/net/enic/enic_ethdev.c
drivers/net/enic/enic_main.c
drivers/net/enic/enic_res.c

index bff5c77..c535b58 100644 (file)
@@ -122,6 +122,24 @@ Configuration information
     uses this interrupt to get information about link status and errors
     in the fast path.
 
+.. _enic-flow-director:
+
+Flow director support
+---------------------
+
+Advanced filtering support was added to 1300 series VIC firmware starting
+with version 2.0.13 for C-series UCS servers and version 3.1.2 for UCSM
+managed blade servers. In order to enable advanced filtering the 'Advanced
+filter' radio button should be enabled via CIMC or UCSM followed by a reboot
+of the server.
+
+With advanced filters, perfect matching of all fields of IPv4, IPv6 headers
+as well as TCP, UDP and SCTP L4 headers is available through flow director.
+Masking of these feilds for partial match is also supported.
+
+Without advanced filter support, the flow director is limited to IPv4
+perfect filtering of the 5-tuple with no masking of fields supported.
+
 Limitations
 -----------
 
@@ -145,6 +163,12 @@ Limitations
      vlan_offload |= ETH_VLAN_STRIP_OFFLOAD;
      rte_eth_dev_set_vlan_offload(port, vlan_offload);
 
+- Limited flow director support on 1200 series and 1300 series Cisco VIC
+  adapters with old firmware. Please see :ref:`enic-flow-director`.
+
+- Flow director features are not supported on generation 1 Cisco VIC adapters
+  (M81KR and P81E)
+
 How to build the suite?
 -----------------------
 The build instructions for the DPDK suite should be followed. By default
@@ -170,9 +194,6 @@ ENIC PMD supports all recent generations of Cisco VIC adapters including:
 - VIC 1385
 - VIC 1387
 
-- Flow director features are not supported on generation 1 Cisco VIC adapters
-   (M81KR and P81E)
-
 Supported Operating Systems
 ---------------------------
 Any Linux distribution fulfilling the conditions described in Dependencies
@@ -187,8 +208,7 @@ Supported features
 - IP checksum offload
 - Receive side VLAN stripping
 - Multiple receive and transmit queues
-- Flow Director ADD, UPDATE, DELETE, STATS operation support for IPV4 5-TUPLE
-  flows
+- Flow Director ADD, UPDATE, DELETE, STATS operation support IPv4 and IPv6
 - Promiscuous mode
 - Setting RX VLAN (supported via UCSM/CIMC only)
 - VLAN filtering (supported via UCSM/CIMC only)
index 7d3f801..523d4f1 100644 (file)
@@ -17,6 +17,7 @@ RSS hash             = Y
 VLAN filter          = Y
 CRC offload          = Y
 VLAN offload         = Y
+Flow director        = Y
 L3 checksum offload  = Y
 L4 checksum offload  = Y
 Packet type parsing  = Y
index 4db21a4..84e4840 100644 (file)
@@ -470,6 +470,18 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
        }
 }
 
+int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
+{
+       u64 a0 = (u32)CMD_ADD_ADV_FILTER, a1 = 0;
+       int wait = 1000;
+       int err;
+
+       err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
+       if (err)
+               return 0;
+       return (a1 >= (u32)FILTER_DPDK_1);
+}
+
 static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
 {
        u64 a0 = (u32)cmd, a1 = 0;
@@ -1007,7 +1019,7 @@ int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
  * @data: filter data
  */
 int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
-       struct filter *data)
+       struct filter_v2 *data)
 {
        u64 a0, a1;
        int wait = 1000;
@@ -1016,11 +1028,20 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
        struct filter_tlv *tlv, *tlv_va;
        struct filter_action *action;
        u64 tlv_size;
+       u32 filter_size;
        static unsigned int unique_id;
        char z_name[RTE_MEMZONE_NAMESIZE];
+       enum vnic_devcmd_cmd dev_cmd;
+
 
        if (cmd == CLSF_ADD) {
-               tlv_size = sizeof(struct filter) +
+               if (data->type == FILTER_DPDK_1)
+                       dev_cmd = CMD_ADD_ADV_FILTER;
+               else
+                       dev_cmd = CMD_ADD_FILTER;
+
+               filter_size = vnic_filter_size(data);
+               tlv_size = filter_size +
                    sizeof(struct filter_action) +
                    2*sizeof(struct filter_tlv);
                snprintf((char *)z_name, sizeof(z_name),
@@ -1034,12 +1055,12 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
                a1 = tlv_size;
                memset(tlv, 0, tlv_size);
                tlv->type = CLSF_TLV_FILTER;
-               tlv->length = sizeof(struct filter);
-               *(struct filter *)&tlv->val = *data;
+               tlv->length = filter_size;
+               memcpy(&tlv->val, (void *)data, filter_size);
 
                tlv = (struct filter_tlv *)((char *)tlv +
                                         sizeof(struct filter_tlv) +
-                                        sizeof(struct filter));
+                                        filter_size);
 
                tlv->type = CLSF_TLV_ACTION;
                tlv->length = sizeof(struct filter_action);
@@ -1047,7 +1068,7 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
                action->type = FILTER_ACTION_RQ_STEERING;
                action->u.rq_idx = *entry;
 
-               ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait);
+               ret = vnic_dev_cmd(vdev, dev_cmd, &a0, &a1, wait);
                *entry = (u16)a0;
                vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa);
        } else if (cmd == CLSF_DEL) {
index 689442f..06ebd4c 100644 (file)
@@ -134,6 +134,7 @@ void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf);
 void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev);
 int vnic_dev_fw_info(struct vnic_dev *vdev,
        struct vnic_devcmd_fw_info **fw_info);
+int vnic_dev_capable_adv_filters(struct vnic_dev *vdev);
 int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev);
 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
        void *value);
@@ -201,7 +202,7 @@ int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
 int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
 int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
 int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
-       struct filter *data);
+       struct filter_v2 *data);
 #ifdef ENIC_VXLAN
 int vnic_dev_overlay_offload_enable_disable(struct vnic_dev *vdev,
        u8 overlay, u8 config);
index 17d6c05..4ea4e4a 100644 (file)
@@ -92,6 +92,11 @@ struct enic_fdir {
        struct rte_eth_fdir_stats stats;
        struct rte_hash *hash;
        struct enic_fdir_node *nodes[ENICPMD_FDIR_MAX];
+       u32 modes;
+       u32 types_mask;
+       void (*copy_fltr_fn)(struct filter_v2 *filt,
+                            struct rte_eth_fdir_input *input,
+                            struct rte_eth_fdir_masks *masks);
 };
 
 struct enic_soft_stats {
@@ -128,6 +133,7 @@ struct enic {
        int link_status;
        u8 hw_ip_checksum;
        u16 max_mtu;
+       u16 adv_filters;
 
        unsigned int flags;
        unsigned int priv_flags;
@@ -283,4 +289,11 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                               uint16_t nb_pkts);
 int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
 int enic_link_update(struct enic *enic);
+void enic_fdir_info(struct enic *enic);
+void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *stats);
+void copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+                 struct rte_eth_fdir_masks *masks);
+void copy_fltr_v2(__rte_unused struct filter_v2 *fltr,
+                 __rte_unused struct rte_eth_fdir_input *input,
+                 __rte_unused struct rte_eth_fdir_masks *masks);
 #endif /* _ENIC_H_ */
index 111b194..d2413d7 100644 (file)
 #include <rte_malloc.h>
 #include <rte_hash.h>
 #include <rte_byteorder.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_sctp.h>
+#include <rte_eth_ctrl.h>
 
 #include "enic_compat.h"
 #include "enic.h"
@@ -67,6 +72,262 @@ void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats)
        *stats = enic->fdir.stats;
 }
 
+void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *info)
+{
+       info->mode = (enum rte_fdir_mode)enic->fdir.modes;
+       info->flow_types_mask[0] = enic->fdir.types_mask;
+}
+
+void enic_fdir_info(struct enic *enic)
+{
+       enic->fdir.modes = (u32)RTE_FDIR_MODE_PERFECT;
+       enic->fdir.types_mask  = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP |
+                                1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+       if (enic->adv_filters) {
+               enic->fdir.types_mask |= 1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER |
+                                        1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP |
+                                        1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP |
+                                        1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP |
+                                        1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP |
+                                        1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+               enic->fdir.copy_fltr_fn = copy_fltr_v2;
+       } else {
+               enic->fdir.copy_fltr_fn = copy_fltr_v1;
+       }
+}
+
+static void
+enic_set_layer(struct filter_generic_1 *gp, unsigned int flag,
+              enum filter_generic_1_layer layer, void *mask, void *val,
+              unsigned int len)
+{
+       gp->mask_flags |= flag;
+       gp->val_flags |= gp->mask_flags;
+       memcpy(gp->layer[layer].mask, mask, len);
+       memcpy(gp->layer[layer].val, val, len);
+}
+
+/* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
+ * without advanced filter support.
+ */
+void
+copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+            __rte_unused struct rte_eth_fdir_masks *masks)
+{
+       fltr->type = FILTER_IPV4_5TUPLE;
+       fltr->u.ipv4.src_addr = rte_be_to_cpu_32(
+               input->flow.ip4_flow.src_ip);
+       fltr->u.ipv4.dst_addr = rte_be_to_cpu_32(
+               input->flow.ip4_flow.dst_ip);
+       fltr->u.ipv4.src_port = rte_be_to_cpu_16(
+               input->flow.udp4_flow.src_port);
+       fltr->u.ipv4.dst_port = rte_be_to_cpu_16(
+               input->flow.udp4_flow.dst_port);
+
+       if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
+               fltr->u.ipv4.protocol = PROTO_TCP;
+       else
+               fltr->u.ipv4.protocol = PROTO_UDP;
+
+       fltr->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+}
+
+/* Copy Flow Director filter to a VIC generic filter (requires advanced
+ * filter support.
+ */
+void
+copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+            struct rte_eth_fdir_masks *masks)
+{
+       struct filter_generic_1 *gp = &fltr->u.generic_1;
+       int i;
+
+       fltr->type = FILTER_DPDK_1;
+       memset(gp, 0, sizeof(*gp));
+
+       if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
+               struct udp_hdr udp_mask, udp_val;
+               memset(&udp_mask, 0, sizeof(udp_mask));
+               memset(&udp_val, 0, sizeof(udp_val));
+
+               if (input->flow.udp4_flow.src_port) {
+                       udp_mask.src_port = masks->src_port_mask;
+                       udp_val.src_port = input->flow.udp4_flow.src_port;
+               }
+               if (input->flow.udp4_flow.dst_port) {
+                       udp_mask.src_port = masks->dst_port_mask;
+                       udp_val.dst_port = input->flow.udp4_flow.dst_port;
+               }
+
+               enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
+                              &udp_mask, &udp_val, sizeof(struct udp_hdr));
+       } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
+               struct tcp_hdr tcp_mask, tcp_val;
+               memset(&tcp_mask, 0, sizeof(tcp_mask));
+               memset(&tcp_val, 0, sizeof(tcp_val));
+
+               if (input->flow.tcp4_flow.src_port) {
+                       tcp_mask.src_port = masks->src_port_mask;
+                       tcp_val.src_port = input->flow.tcp4_flow.src_port;
+               }
+               if (input->flow.tcp4_flow.dst_port) {
+                       tcp_mask.dst_port = masks->dst_port_mask;
+                       tcp_val.dst_port = input->flow.tcp4_flow.dst_port;
+               }
+
+               enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
+                              &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
+       } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
+               struct sctp_hdr sctp_mask, sctp_val;
+               memset(&sctp_mask, 0, sizeof(sctp_mask));
+               memset(&sctp_val, 0, sizeof(sctp_val));
+
+               if (input->flow.sctp4_flow.src_port) {
+                       sctp_mask.src_port = masks->src_port_mask;
+                       sctp_val.src_port = input->flow.sctp4_flow.src_port;
+               }
+               if (input->flow.sctp4_flow.dst_port) {
+                       sctp_mask.dst_port = masks->dst_port_mask;
+                       sctp_val.dst_port = input->flow.sctp4_flow.dst_port;
+               }
+               if (input->flow.sctp4_flow.verify_tag) {
+                       sctp_mask.tag = 0xffffffff;
+                       sctp_val.tag = input->flow.sctp4_flow.verify_tag;
+               }
+
+               /* v4 proto should be 132, override ip4_flow.proto */
+               input->flow.ip4_flow.proto = 132;
+
+               enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
+                              &sctp_val, sizeof(struct sctp_hdr));
+       }
+
+       if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP ||
+           input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP ||
+           input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP ||
+           input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) {
+               struct ipv4_hdr ip4_mask, ip4_val;
+               memset(&ip4_mask, 0, sizeof(struct ipv4_hdr));
+               memset(&ip4_val, 0, sizeof(struct ipv4_hdr));
+
+               if (input->flow.ip4_flow.tos) {
+                       ip4_mask.type_of_service = 0xff;
+                       ip4_val.type_of_service = input->flow.ip4_flow.tos;
+               }
+               if (input->flow.ip4_flow.ttl) {
+                       ip4_mask.time_to_live = 0xff;
+                       ip4_val.time_to_live = input->flow.ip4_flow.ttl;
+               }
+               if (input->flow.ip4_flow.proto) {
+                       ip4_mask.next_proto_id = 0xff;
+                       ip4_val.next_proto_id = input->flow.ip4_flow.proto;
+               }
+               if (input->flow.ip4_flow.src_ip) {
+                       ip4_mask.src_addr =  masks->ipv4_mask.src_ip;
+                       ip4_val.src_addr = input->flow.ip4_flow.src_ip;
+               }
+               if (input->flow.ip4_flow.dst_ip) {
+                       ip4_mask.dst_addr =  masks->ipv4_mask.dst_ip;
+                       ip4_val.dst_addr = input->flow.ip4_flow.dst_ip;
+               }
+
+               enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3,
+                              &ip4_mask, &ip4_val, sizeof(struct ipv4_hdr));
+       }
+
+       if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
+               struct udp_hdr udp_mask, udp_val;
+               memset(&udp_mask, 0, sizeof(udp_mask));
+               memset(&udp_val, 0, sizeof(udp_val));
+
+               if (input->flow.udp6_flow.src_port) {
+                       udp_mask.src_port = masks->src_port_mask;
+                       udp_val.src_port = input->flow.udp6_flow.src_port;
+               }
+               if (input->flow.udp6_flow.dst_port) {
+                       udp_mask.dst_port = masks->dst_port_mask;
+                       udp_val.dst_port = input->flow.udp6_flow.dst_port;
+               }
+               enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
+                              &udp_mask, &udp_val, sizeof(struct udp_hdr));
+       } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
+               struct tcp_hdr tcp_mask, tcp_val;
+               memset(&tcp_mask, 0, sizeof(tcp_mask));
+               memset(&tcp_val, 0, sizeof(tcp_val));
+
+               if (input->flow.tcp6_flow.src_port) {
+                       tcp_mask.src_port = masks->src_port_mask;
+                       tcp_val.src_port = input->flow.tcp6_flow.src_port;
+               }
+               if (input->flow.tcp6_flow.dst_port) {
+                       tcp_mask.dst_port = masks->dst_port_mask;
+                       tcp_val.dst_port = input->flow.tcp6_flow.dst_port;
+               }
+               enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
+                              &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
+       } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
+               struct sctp_hdr sctp_mask, sctp_val;
+               memset(&sctp_mask, 0, sizeof(sctp_mask));
+               memset(&sctp_val, 0, sizeof(sctp_val));
+
+               if (input->flow.sctp6_flow.src_port) {
+                       sctp_mask.src_port = masks->src_port_mask;
+                       sctp_val.src_port = input->flow.sctp6_flow.src_port;
+               }
+               if (input->flow.sctp6_flow.dst_port) {
+                       sctp_mask.dst_port = masks->dst_port_mask;
+                       sctp_val.dst_port = input->flow.sctp6_flow.dst_port;
+               }
+               if (input->flow.sctp6_flow.verify_tag) {
+                       sctp_mask.tag = 0xffffffff;
+                       sctp_val.tag = input->flow.sctp6_flow.verify_tag;
+               }
+
+               /* v4 proto should be 132, override ipv6_flow.proto */
+               input->flow.ipv6_flow.proto = 132;
+
+               enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
+                              &sctp_val, sizeof(struct sctp_hdr));
+       }
+
+       if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP ||
+           input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP ||
+           input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP ||
+           input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) {
+               struct ipv6_hdr ipv6_mask, ipv6_val;
+               memset(&ipv6_mask, 0, sizeof(struct ipv6_hdr));
+               memset(&ipv6_val, 0, sizeof(struct ipv6_hdr));
+
+               if (input->flow.ipv6_flow.proto) {
+                       ipv6_mask.proto = 0xff;
+                       ipv6_val.proto = input->flow.ipv6_flow.proto;
+               }
+               for (i = 0; i < 4; i++) {
+                       *(uint32_t *)&ipv6_mask.src_addr[i * 4] =
+                                       masks->ipv6_mask.src_ip[i];
+                       *(uint32_t *)&ipv6_val.src_addr[i * 4] =
+                                       input->flow.ipv6_flow.src_ip[i];
+               }
+               for (i = 0; i < 4; i++) {
+                       *(uint32_t *)&ipv6_mask.dst_addr[i * 4] =
+                                       masks->ipv6_mask.src_ip[i];
+                       *(uint32_t *)&ipv6_val.dst_addr[i * 4] =
+                                       input->flow.ipv6_flow.dst_ip[i];
+               }
+               if (input->flow.ipv6_flow.tc) {
+                       ipv6_mask.vtc_flow = 0x00ff0000;
+                       ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 16;
+               }
+               if (input->flow.ipv6_flow.hop_limits) {
+                       ipv6_mask.hop_limits = 0xff;
+                       ipv6_val.hop_limits = input->flow.ipv6_flow.hop_limits;
+               }
+
+               enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3,
+                              &ipv6_mask, &ipv6_val, sizeof(struct ipv6_hdr));
+       }
+}
+
 int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
 {
        int32_t pos;
@@ -97,7 +358,7 @@ int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
 int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
 {
        struct enic_fdir_node *key;
-       struct filter fltr = {0};
+       struct filter_v2 fltr;
        int32_t pos;
        u8 do_free = 0;
        u16 old_fltr_id = 0;
@@ -105,9 +366,9 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
        u16 flex_bytes;
        u16 queue;
 
-       flowtype_supported = (
-               (RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type) ||
-               (RTE_ETH_FLOW_NONFRAG_IPV4_UDP == params->input.flow_type));
+       memset(&fltr, 0, sizeof(fltr));
+       flowtype_supported = enic->fdir.types_mask
+                            & (1 << params->input.flow_type);
 
        flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
                (params->input.flow_ext.flexbytes[0] & 0xFF));
@@ -123,6 +384,9 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
        /* Get the enicpmd RQ from the DPDK Rx queue */
        queue = enic_sop_rq(params->action.rx_queue);
 
+       if (!enic->rq[queue].in_use)
+               return -EINVAL;
+
        /* See if the key is already there in the table */
        pos = rte_hash_del_key(enic->fdir.hash, params);
        switch (pos) {
@@ -185,22 +449,8 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
        key->filter = *params;
        key->rq_index = queue;
 
-       fltr.type = FILTER_IPV4_5TUPLE;
-       fltr.u.ipv4.src_addr = rte_be_to_cpu_32(
-               params->input.flow.ip4_flow.src_ip);
-       fltr.u.ipv4.dst_addr = rte_be_to_cpu_32(
-               params->input.flow.ip4_flow.dst_ip);
-       fltr.u.ipv4.src_port = rte_be_to_cpu_16(
-               params->input.flow.udp4_flow.src_port);
-       fltr.u.ipv4.dst_port = rte_be_to_cpu_16(
-               params->input.flow.udp4_flow.dst_port);
-
-       if (RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type)
-               fltr.u.ipv4.protocol = PROTO_TCP;
-       else
-               fltr.u.ipv4.protocol = PROTO_UDP;
-
-       fltr.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+       enic->fdir.copy_fltr_fn(&fltr, &params->input,
+                               &enic->rte_dev->data->dev_conf.fdir_conf.mask);
 
        if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr)) {
                key->fltr_id = queue;
index 82dc265..4d24bbd 100644 (file)
@@ -95,10 +95,12 @@ enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
                break;
 
        case RTE_ETH_FILTER_FLUSH:
-       case RTE_ETH_FILTER_INFO:
                dev_warning(enic, "unsupported operation %u", filter_op);
                ret = -ENOTSUP;
                break;
+       case RTE_ETH_FILTER_INFO:
+               enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg);
+               break;
        default:
                dev_err(enic, "unknown operation %u", filter_op);
                ret = -EINVAL;
index e3e58fb..622b317 100644 (file)
@@ -1293,6 +1293,9 @@ static int enic_dev_init(struct enic *enic)
                return -EINVAL;
        }
 
+       /* Get the supported filters */
+       enic_fdir_info(enic);
+
        eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0);
        if (!eth_dev->data->mac_addrs) {
                dev_err(enic, "mac addr storage alloc failed, aborting.\n");
index 84c5d33..8a230a1 100644 (file)
@@ -62,6 +62,7 @@ int enic_get_vnic_config(struct enic *enic)
                return err;
        }
 
+
 #define GET_CONFIG(m) \
        do { \
                err = vnic_dev_spec(enic->vdev, \
@@ -98,6 +99,10 @@ int enic_get_vnic_config(struct enic *enic)
        enic->rte_dev->data->mtu = min_t(u16, enic->max_mtu,
                                         max_t(u16, ENIC_MIN_MTU, c->mtu));
 
+       enic->adv_filters = vnic_dev_capable_adv_filters(enic->vdev);
+       dev_info(enic, "Advanced Filters %savailable\n", ((enic->adv_filters)
+                ? "" : "not "));
+
        c->wq_desc_count =
                min_t(u32, ENIC_MAX_WQ_DESCS,
                max_t(u32, ENIC_MIN_WQ_DESCS,