2 * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * Copyright (c) 2014, Cisco Systems, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_ethdev.h>
38 #include <rte_malloc.h>
40 #include <rte_byteorder.h>
45 #include <rte_eth_ctrl.h>
47 #include "enic_compat.h"
49 #include "wq_enet_desc.h"
50 #include "rq_enet_desc.h"
51 #include "cq_enet_desc.h"
52 #include "vnic_enet.h"
57 #include "vnic_intr.h"
60 #ifdef RTE_MACHINE_CPUFLAG_SSE4_2
61 #include <rte_hash_crc.h>
62 #define DEFAULT_HASH_FUNC rte_hash_crc
64 #include <rte_jhash.h>
65 #define DEFAULT_HASH_FUNC rte_jhash
68 #define ENICPMD_CLSF_HASH_ENTRIES ENICPMD_FDIR_MAX
70 void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats)
72 *stats = enic->fdir.stats;
75 void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *info)
77 info->mode = (enum rte_fdir_mode)enic->fdir.modes;
78 info->flow_types_mask[0] = enic->fdir.types_mask;
81 void enic_fdir_info(struct enic *enic)
83 enic->fdir.modes = (u32)RTE_FDIR_MODE_PERFECT;
84 enic->fdir.types_mask = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP |
85 1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
86 if (enic->adv_filters) {
87 enic->fdir.types_mask |= 1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER |
88 1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP |
89 1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP |
90 1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP |
91 1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP |
92 1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
93 enic->fdir.copy_fltr_fn = copy_fltr_v2;
95 enic->fdir.copy_fltr_fn = copy_fltr_v1;
100 enic_set_layer(struct filter_generic_1 *gp, unsigned int flag,
101 enum filter_generic_1_layer layer, void *mask, void *val,
104 gp->mask_flags |= flag;
105 gp->val_flags |= gp->mask_flags;
106 memcpy(gp->layer[layer].mask, mask, len);
107 memcpy(gp->layer[layer].val, val, len);
110 /* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
111 * without advanced filter support.
114 copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
115 __rte_unused struct rte_eth_fdir_masks *masks)
117 fltr->type = FILTER_IPV4_5TUPLE;
118 fltr->u.ipv4.src_addr = rte_be_to_cpu_32(
119 input->flow.ip4_flow.src_ip);
120 fltr->u.ipv4.dst_addr = rte_be_to_cpu_32(
121 input->flow.ip4_flow.dst_ip);
122 fltr->u.ipv4.src_port = rte_be_to_cpu_16(
123 input->flow.udp4_flow.src_port);
124 fltr->u.ipv4.dst_port = rte_be_to_cpu_16(
125 input->flow.udp4_flow.dst_port);
127 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
128 fltr->u.ipv4.protocol = PROTO_TCP;
130 fltr->u.ipv4.protocol = PROTO_UDP;
132 fltr->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
135 /* Copy Flow Director filter to a VIC generic filter (requires advanced
139 copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
140 struct rte_eth_fdir_masks *masks)
142 struct filter_generic_1 *gp = &fltr->u.generic_1;
145 fltr->type = FILTER_DPDK_1;
146 memset(gp, 0, sizeof(*gp));
148 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
149 struct udp_hdr udp_mask, udp_val;
150 memset(&udp_mask, 0, sizeof(udp_mask));
151 memset(&udp_val, 0, sizeof(udp_val));
153 if (input->flow.udp4_flow.src_port) {
154 udp_mask.src_port = masks->src_port_mask;
155 udp_val.src_port = input->flow.udp4_flow.src_port;
157 if (input->flow.udp4_flow.dst_port) {
158 udp_mask.dst_port = masks->dst_port_mask;
159 udp_val.dst_port = input->flow.udp4_flow.dst_port;
162 enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
163 &udp_mask, &udp_val, sizeof(struct udp_hdr));
164 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
165 struct tcp_hdr tcp_mask, tcp_val;
166 memset(&tcp_mask, 0, sizeof(tcp_mask));
167 memset(&tcp_val, 0, sizeof(tcp_val));
169 if (input->flow.tcp4_flow.src_port) {
170 tcp_mask.src_port = masks->src_port_mask;
171 tcp_val.src_port = input->flow.tcp4_flow.src_port;
173 if (input->flow.tcp4_flow.dst_port) {
174 tcp_mask.dst_port = masks->dst_port_mask;
175 tcp_val.dst_port = input->flow.tcp4_flow.dst_port;
178 enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
179 &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
180 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
181 struct sctp_hdr sctp_mask, sctp_val;
182 memset(&sctp_mask, 0, sizeof(sctp_mask));
183 memset(&sctp_val, 0, sizeof(sctp_val));
185 if (input->flow.sctp4_flow.src_port) {
186 sctp_mask.src_port = masks->src_port_mask;
187 sctp_val.src_port = input->flow.sctp4_flow.src_port;
189 if (input->flow.sctp4_flow.dst_port) {
190 sctp_mask.dst_port = masks->dst_port_mask;
191 sctp_val.dst_port = input->flow.sctp4_flow.dst_port;
193 if (input->flow.sctp4_flow.verify_tag) {
194 sctp_mask.tag = 0xffffffff;
195 sctp_val.tag = input->flow.sctp4_flow.verify_tag;
198 /* v4 proto should be 132, override ip4_flow.proto */
199 input->flow.ip4_flow.proto = 132;
201 enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
202 &sctp_val, sizeof(struct sctp_hdr));
205 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP ||
206 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP ||
207 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP ||
208 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) {
209 struct ipv4_hdr ip4_mask, ip4_val;
210 memset(&ip4_mask, 0, sizeof(struct ipv4_hdr));
211 memset(&ip4_val, 0, sizeof(struct ipv4_hdr));
213 if (input->flow.ip4_flow.tos) {
214 ip4_mask.type_of_service = masks->ipv4_mask.tos;
215 ip4_val.type_of_service = input->flow.ip4_flow.tos;
217 if (input->flow.ip4_flow.ttl) {
218 ip4_mask.time_to_live = masks->ipv4_mask.ttl;
219 ip4_val.time_to_live = input->flow.ip4_flow.ttl;
221 if (input->flow.ip4_flow.proto) {
222 ip4_mask.next_proto_id = masks->ipv4_mask.proto;
223 ip4_val.next_proto_id = input->flow.ip4_flow.proto;
225 if (input->flow.ip4_flow.src_ip) {
226 ip4_mask.src_addr = masks->ipv4_mask.src_ip;
227 ip4_val.src_addr = input->flow.ip4_flow.src_ip;
229 if (input->flow.ip4_flow.dst_ip) {
230 ip4_mask.dst_addr = masks->ipv4_mask.dst_ip;
231 ip4_val.dst_addr = input->flow.ip4_flow.dst_ip;
234 enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3,
235 &ip4_mask, &ip4_val, sizeof(struct ipv4_hdr));
238 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
239 struct udp_hdr udp_mask, udp_val;
240 memset(&udp_mask, 0, sizeof(udp_mask));
241 memset(&udp_val, 0, sizeof(udp_val));
243 if (input->flow.udp6_flow.src_port) {
244 udp_mask.src_port = masks->src_port_mask;
245 udp_val.src_port = input->flow.udp6_flow.src_port;
247 if (input->flow.udp6_flow.dst_port) {
248 udp_mask.dst_port = masks->dst_port_mask;
249 udp_val.dst_port = input->flow.udp6_flow.dst_port;
251 enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
252 &udp_mask, &udp_val, sizeof(struct udp_hdr));
253 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
254 struct tcp_hdr tcp_mask, tcp_val;
255 memset(&tcp_mask, 0, sizeof(tcp_mask));
256 memset(&tcp_val, 0, sizeof(tcp_val));
258 if (input->flow.tcp6_flow.src_port) {
259 tcp_mask.src_port = masks->src_port_mask;
260 tcp_val.src_port = input->flow.tcp6_flow.src_port;
262 if (input->flow.tcp6_flow.dst_port) {
263 tcp_mask.dst_port = masks->dst_port_mask;
264 tcp_val.dst_port = input->flow.tcp6_flow.dst_port;
266 enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
267 &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
268 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
269 struct sctp_hdr sctp_mask, sctp_val;
270 memset(&sctp_mask, 0, sizeof(sctp_mask));
271 memset(&sctp_val, 0, sizeof(sctp_val));
273 if (input->flow.sctp6_flow.src_port) {
274 sctp_mask.src_port = masks->src_port_mask;
275 sctp_val.src_port = input->flow.sctp6_flow.src_port;
277 if (input->flow.sctp6_flow.dst_port) {
278 sctp_mask.dst_port = masks->dst_port_mask;
279 sctp_val.dst_port = input->flow.sctp6_flow.dst_port;
281 if (input->flow.sctp6_flow.verify_tag) {
282 sctp_mask.tag = 0xffffffff;
283 sctp_val.tag = input->flow.sctp6_flow.verify_tag;
286 /* v4 proto should be 132, override ipv6_flow.proto */
287 input->flow.ipv6_flow.proto = 132;
289 enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
290 &sctp_val, sizeof(struct sctp_hdr));
293 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP ||
294 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP ||
295 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP ||
296 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) {
297 struct ipv6_hdr ipv6_mask, ipv6_val;
298 memset(&ipv6_mask, 0, sizeof(struct ipv6_hdr));
299 memset(&ipv6_val, 0, sizeof(struct ipv6_hdr));
301 if (input->flow.ipv6_flow.proto) {
302 ipv6_mask.proto = masks->ipv6_mask.proto;
303 ipv6_val.proto = input->flow.ipv6_flow.proto;
305 for (i = 0; i < 4; i++) {
306 *(uint32_t *)&ipv6_mask.src_addr[i * 4] =
307 masks->ipv6_mask.src_ip[i];
308 *(uint32_t *)&ipv6_val.src_addr[i * 4] =
309 input->flow.ipv6_flow.src_ip[i];
311 for (i = 0; i < 4; i++) {
312 *(uint32_t *)&ipv6_mask.dst_addr[i * 4] =
313 masks->ipv6_mask.src_ip[i];
314 *(uint32_t *)&ipv6_val.dst_addr[i * 4] =
315 input->flow.ipv6_flow.dst_ip[i];
317 if (input->flow.ipv6_flow.tc) {
318 ipv6_mask.vtc_flow = masks->ipv6_mask.tc << 12;
319 ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12;
321 if (input->flow.ipv6_flow.hop_limits) {
322 ipv6_mask.hop_limits = masks->ipv6_mask.hop_limits;
323 ipv6_val.hop_limits = input->flow.ipv6_flow.hop_limits;
326 enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3,
327 &ipv6_mask, &ipv6_val, sizeof(struct ipv6_hdr));
331 int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
334 struct enic_fdir_node *key;
335 /* See if the key is in the table */
336 pos = rte_hash_del_key(enic->fdir.hash, params);
340 enic->fdir.stats.f_remove++;
343 /* The entry is present in the table */
344 key = enic->fdir.nodes[pos];
346 /* Delete the filter */
347 vnic_dev_classifier(enic->vdev, CLSF_DEL,
348 &key->fltr_id, NULL);
350 enic->fdir.nodes[pos] = NULL;
351 enic->fdir.stats.free++;
352 enic->fdir.stats.remove++;
358 int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
360 struct enic_fdir_node *key;
361 struct filter_v2 fltr;
365 u32 flowtype_supported;
369 memset(&fltr, 0, sizeof(fltr));
370 flowtype_supported = enic->fdir.types_mask
371 & (1 << params->input.flow_type);
373 flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
374 (params->input.flow_ext.flexbytes[0] & 0xFF));
376 if (!enic->fdir.hash ||
377 (params->input.flow_ext.vlan_tci & 0xFFF) ||
378 !flowtype_supported || flex_bytes ||
379 params->action.behavior /* drop */) {
380 enic->fdir.stats.f_add++;
384 /* Get the enicpmd RQ from the DPDK Rx queue */
385 queue = enic_rte_rq_idx_to_sop_idx(params->action.rx_queue);
387 if (!enic->rq[queue].in_use)
390 /* See if the key is already there in the table */
391 pos = rte_hash_del_key(enic->fdir.hash, params);
394 enic->fdir.stats.f_add++;
397 /* Add a new classifier entry */
398 if (!enic->fdir.stats.free) {
399 enic->fdir.stats.f_add++;
402 key = rte_zmalloc("enic_fdir_node",
403 sizeof(struct enic_fdir_node), 0);
405 enic->fdir.stats.f_add++;
410 /* The entry is already present in the table.
411 * Check if there is a change in queue
413 key = enic->fdir.nodes[pos];
414 enic->fdir.nodes[pos] = NULL;
415 if (unlikely(key->rq_index == queue)) {
416 /* Nothing to be done */
417 enic->fdir.stats.f_add++;
418 pos = rte_hash_add_key(enic->fdir.hash, params);
420 dev_err(enic, "Add hash key failed\n");
423 enic->fdir.nodes[pos] = key;
425 "FDIR rule is already present\n");
429 if (likely(enic->fdir.stats.free)) {
430 /* Add the filter and then delete the old one.
431 * This is to avoid packets from going into the
432 * default queue during the window between
436 old_fltr_id = key->fltr_id;
438 /* No free slots in the classifier.
439 * Delete the filter and add the modified one later
441 vnic_dev_classifier(enic->vdev, CLSF_DEL,
442 &key->fltr_id, NULL);
443 enic->fdir.stats.free++;
449 key->filter = *params;
450 key->rq_index = queue;
452 enic->fdir.copy_fltr_fn(&fltr, ¶ms->input,
453 &enic->rte_dev->data->dev_conf.fdir_conf.mask);
455 if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr)) {
456 key->fltr_id = queue;
458 dev_err(enic, "Add classifier entry failed\n");
459 enic->fdir.stats.f_add++;
465 vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL);
467 enic->fdir.stats.free--;
468 enic->fdir.stats.add++;
471 pos = rte_hash_add_key(enic->fdir.hash, params);
473 enic->fdir.stats.f_add++;
474 dev_err(enic, "Add hash key failed\n");
478 enic->fdir.nodes[pos] = key;
482 void enic_clsf_destroy(struct enic *enic)
485 struct enic_fdir_node *key;
486 /* delete classifier entries */
487 for (index = 0; index < ENICPMD_FDIR_MAX; index++) {
488 key = enic->fdir.nodes[index];
490 vnic_dev_classifier(enic->vdev, CLSF_DEL,
491 &key->fltr_id, NULL);
493 enic->fdir.nodes[index] = NULL;
497 if (enic->fdir.hash) {
498 rte_hash_free(enic->fdir.hash);
499 enic->fdir.hash = NULL;
503 int enic_clsf_init(struct enic *enic)
505 char clsf_name[RTE_HASH_NAMESIZE];
506 struct rte_hash_parameters hash_params = {
508 .entries = ENICPMD_CLSF_HASH_ENTRIES,
509 .key_len = sizeof(struct rte_eth_fdir_filter),
510 .hash_func = DEFAULT_HASH_FUNC,
511 .hash_func_init_val = 0,
512 .socket_id = SOCKET_ID_ANY,
514 snprintf(clsf_name, RTE_HASH_NAMESIZE, "enic_clsf_%s", enic->bdf_name);
515 enic->fdir.hash = rte_hash_create(&hash_params);
516 memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats));
517 enic->fdir.stats.free = ENICPMD_FDIR_MAX;
518 return NULL == enic->fdir.hash;