2 * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * Copyright (c) 2014, Cisco Systems, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
41 #include <rte_byteorder.h>
43 #include "enic_compat.h"
45 #include "wq_enet_desc.h"
46 #include "rq_enet_desc.h"
47 #include "cq_enet_desc.h"
48 #include "vnic_enet.h"
53 #include "vnic_intr.h"
56 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
57 #include <rte_hash_crc.h>
58 #define DEFAULT_HASH_FUNC rte_hash_crc
60 #include <rte_jhash.h>
61 #define DEFAULT_HASH_FUNC rte_jhash
65 #define ENICPMD_CLSF_HASH_ENTRIES ENICPMD_FDIR_MAX
66 #define ENICPMD_CLSF_BUCKET_ENTRIES 4
68 int enic_fdir_del_fltr(struct enic *enic, struct rte_fdir_filter *params)
71 struct enic_fdir_node *key;
72 /* See if the key is in the table */
73 pos = rte_hash_del_key(enic->fdir.hash, params);
77 enic->fdir.stats.f_remove++;
80 /* The entry is present in the table */
81 key = enic->fdir.nodes[pos];
83 /* Delete the filter */
84 vnic_dev_classifier(enic->vdev, CLSF_DEL,
87 enic->fdir.nodes[pos] = NULL;
88 enic->fdir.stats.free++;
89 enic->fdir.stats.remove++;
95 int enic_fdir_add_fltr(struct enic *enic, struct rte_fdir_filter *params,
98 struct enic_fdir_node *key;
99 struct filter fltr = {.type = 0};
104 if (!enic->fdir.hash || params->vlan_id || !params->l4type ||
105 (RTE_FDIR_IPTYPE_IPV6 == params->iptype) ||
106 (RTE_FDIR_L4TYPE_SCTP == params->l4type) ||
107 params->flex_bytes || drop) {
108 enic->fdir.stats.f_add++;
112 /* See if the key is already there in the table */
113 pos = rte_hash_del_key(enic->fdir.hash, params);
116 enic->fdir.stats.f_add++;
119 /* Add a new classifier entry */
120 if (!enic->fdir.stats.free) {
121 enic->fdir.stats.f_add++;
124 key = (struct enic_fdir_node *)rte_zmalloc(
126 sizeof(struct enic_fdir_node), 0);
128 enic->fdir.stats.f_add++;
133 /* The entry is already present in the table.
134 * Check if there is a change in queue
136 key = enic->fdir.nodes[pos];
137 enic->fdir.nodes[pos] = NULL;
138 if (unlikely(key->rq_index == queue)) {
139 /* Nothing to be done */
140 pos = rte_hash_add_key(enic->fdir.hash, params);
141 enic->fdir.nodes[pos] = key;
142 enic->fdir.stats.f_add++;
144 "FDIR rule is already present\n");
148 if (likely(enic->fdir.stats.free)) {
149 /* Add the filter and then delete the old one.
150 * This is to avoid packets from going into the
151 * default queue during the window between
155 old_fltr_id = key->fltr_id;
157 /* No free slots in the classifier.
158 * Delete the filter and add the modified one later
160 vnic_dev_classifier(enic->vdev, CLSF_DEL,
161 &key->fltr_id, NULL);
162 enic->fdir.stats.free++;
168 key->filter = *params;
169 key->rq_index = queue;
171 fltr.type = FILTER_IPV4_5TUPLE;
172 fltr.u.ipv4.src_addr = rte_be_to_cpu_32(params->ip_src.ipv4_addr);
173 fltr.u.ipv4.dst_addr = rte_be_to_cpu_32(params->ip_dst.ipv4_addr);
174 fltr.u.ipv4.src_port = rte_be_to_cpu_16(params->port_src);
175 fltr.u.ipv4.dst_port = rte_be_to_cpu_16(params->port_dst);
177 if (RTE_FDIR_L4TYPE_TCP == params->l4type)
178 fltr.u.ipv4.protocol = PROTO_TCP;
180 fltr.u.ipv4.protocol = PROTO_UDP;
182 fltr.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
184 if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr)) {
185 key->fltr_id = queue;
187 dev_err(enic, "Add classifier entry failed\n");
188 enic->fdir.stats.f_add++;
194 vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL);
196 enic->fdir.stats.free--;
197 enic->fdir.stats.add++;
200 pos = rte_hash_add_key(enic->fdir.hash, (void *)key);
201 enic->fdir.nodes[pos] = key;
205 void enic_clsf_destroy(struct enic *enic)
208 struct enic_fdir_node *key;
209 /* delete classifier entries */
210 for (index = 0; index < ENICPMD_FDIR_MAX; index++) {
211 key = enic->fdir.nodes[index];
213 vnic_dev_classifier(enic->vdev, CLSF_DEL,
214 &key->fltr_id, NULL);
219 if (enic->fdir.hash) {
220 rte_hash_free(enic->fdir.hash);
221 enic->fdir.hash = NULL;
225 int enic_clsf_init(struct enic *enic)
227 struct rte_hash_parameters hash_params = {
228 .name = "enicpmd_clsf_hash",
229 .entries = ENICPMD_CLSF_HASH_ENTRIES,
230 .bucket_entries = ENICPMD_CLSF_BUCKET_ENTRIES,
231 .key_len = sizeof(struct rte_fdir_filter),
232 .hash_func = DEFAULT_HASH_FUNC,
233 .hash_func_init_val = 0,
234 .socket_id = SOCKET_0,
237 enic->fdir.hash = rte_hash_create(&hash_params);
238 memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats));
239 enic->fdir.stats.free = ENICPMD_FDIR_MAX;
240 return (NULL == enic->fdir.hash);