1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
6 #include <rte_ethdev_driver.h>
7 #include <rte_malloc.h>
9 #include <rte_byteorder.h>
14 #include <rte_eth_ctrl.h>
16 #include "enic_compat.h"
18 #include "wq_enet_desc.h"
19 #include "rq_enet_desc.h"
20 #include "cq_enet_desc.h"
21 #include "vnic_enet.h"
26 #include "vnic_intr.h"
30 #include <rte_hash_crc.h>
31 #define DEFAULT_HASH_FUNC rte_hash_crc
33 #include <rte_jhash.h>
34 #define DEFAULT_HASH_FUNC rte_jhash
37 #define ENICPMD_CLSF_HASH_ENTRIES ENICPMD_FDIR_MAX
39 void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats)
41 *stats = enic->fdir.stats;
44 void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *info)
46 info->mode = (enum rte_fdir_mode)enic->fdir.modes;
47 info->flow_types_mask[0] = enic->fdir.types_mask;
50 void enic_fdir_info(struct enic *enic)
52 enic->fdir.modes = (u32)RTE_FDIR_MODE_PERFECT;
53 enic->fdir.types_mask = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP |
54 1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
55 if (enic->adv_filters) {
56 enic->fdir.types_mask |= 1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER |
57 1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP |
58 1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP |
59 1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP |
60 1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP |
61 1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
62 enic->fdir.copy_fltr_fn = copy_fltr_v2;
64 enic->fdir.copy_fltr_fn = copy_fltr_v1;
69 enic_set_layer(struct filter_generic_1 *gp, unsigned int flag,
70 enum filter_generic_1_layer layer, void *mask, void *val,
73 gp->mask_flags |= flag;
74 gp->val_flags |= gp->mask_flags;
75 memcpy(gp->layer[layer].mask, mask, len);
76 memcpy(gp->layer[layer].val, val, len);
79 /* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
80 * without advanced filter support.
83 copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
84 __rte_unused struct rte_eth_fdir_masks *masks)
86 fltr->type = FILTER_IPV4_5TUPLE;
87 fltr->u.ipv4.src_addr = rte_be_to_cpu_32(
88 input->flow.ip4_flow.src_ip);
89 fltr->u.ipv4.dst_addr = rte_be_to_cpu_32(
90 input->flow.ip4_flow.dst_ip);
91 fltr->u.ipv4.src_port = rte_be_to_cpu_16(
92 input->flow.udp4_flow.src_port);
93 fltr->u.ipv4.dst_port = rte_be_to_cpu_16(
94 input->flow.udp4_flow.dst_port);
96 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
97 fltr->u.ipv4.protocol = PROTO_TCP;
99 fltr->u.ipv4.protocol = PROTO_UDP;
101 fltr->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
104 /* Copy Flow Director filter to a VIC generic filter (requires advanced
108 copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
109 struct rte_eth_fdir_masks *masks)
111 struct filter_generic_1 *gp = &fltr->u.generic_1;
113 fltr->type = FILTER_DPDK_1;
114 memset(gp, 0, sizeof(*gp));
116 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
117 struct udp_hdr udp_mask, udp_val;
118 memset(&udp_mask, 0, sizeof(udp_mask));
119 memset(&udp_val, 0, sizeof(udp_val));
121 if (input->flow.udp4_flow.src_port) {
122 udp_mask.src_port = masks->src_port_mask;
123 udp_val.src_port = input->flow.udp4_flow.src_port;
125 if (input->flow.udp4_flow.dst_port) {
126 udp_mask.dst_port = masks->dst_port_mask;
127 udp_val.dst_port = input->flow.udp4_flow.dst_port;
130 enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
131 &udp_mask, &udp_val, sizeof(struct udp_hdr));
132 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
133 struct tcp_hdr tcp_mask, tcp_val;
134 memset(&tcp_mask, 0, sizeof(tcp_mask));
135 memset(&tcp_val, 0, sizeof(tcp_val));
137 if (input->flow.tcp4_flow.src_port) {
138 tcp_mask.src_port = masks->src_port_mask;
139 tcp_val.src_port = input->flow.tcp4_flow.src_port;
141 if (input->flow.tcp4_flow.dst_port) {
142 tcp_mask.dst_port = masks->dst_port_mask;
143 tcp_val.dst_port = input->flow.tcp4_flow.dst_port;
146 enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
147 &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
148 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
149 struct sctp_hdr sctp_mask, sctp_val;
150 memset(&sctp_mask, 0, sizeof(sctp_mask));
151 memset(&sctp_val, 0, sizeof(sctp_val));
153 if (input->flow.sctp4_flow.src_port) {
154 sctp_mask.src_port = masks->src_port_mask;
155 sctp_val.src_port = input->flow.sctp4_flow.src_port;
157 if (input->flow.sctp4_flow.dst_port) {
158 sctp_mask.dst_port = masks->dst_port_mask;
159 sctp_val.dst_port = input->flow.sctp4_flow.dst_port;
161 if (input->flow.sctp4_flow.verify_tag) {
162 sctp_mask.tag = 0xffffffff;
163 sctp_val.tag = input->flow.sctp4_flow.verify_tag;
166 /* v4 proto should be 132, override ip4_flow.proto */
167 input->flow.ip4_flow.proto = 132;
169 enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
170 &sctp_val, sizeof(struct sctp_hdr));
173 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP ||
174 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP ||
175 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP ||
176 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) {
177 struct ipv4_hdr ip4_mask, ip4_val;
178 memset(&ip4_mask, 0, sizeof(struct ipv4_hdr));
179 memset(&ip4_val, 0, sizeof(struct ipv4_hdr));
181 if (input->flow.ip4_flow.tos) {
182 ip4_mask.type_of_service = masks->ipv4_mask.tos;
183 ip4_val.type_of_service = input->flow.ip4_flow.tos;
185 if (input->flow.ip4_flow.ttl) {
186 ip4_mask.time_to_live = masks->ipv4_mask.ttl;
187 ip4_val.time_to_live = input->flow.ip4_flow.ttl;
189 if (input->flow.ip4_flow.proto) {
190 ip4_mask.next_proto_id = masks->ipv4_mask.proto;
191 ip4_val.next_proto_id = input->flow.ip4_flow.proto;
193 if (input->flow.ip4_flow.src_ip) {
194 ip4_mask.src_addr = masks->ipv4_mask.src_ip;
195 ip4_val.src_addr = input->flow.ip4_flow.src_ip;
197 if (input->flow.ip4_flow.dst_ip) {
198 ip4_mask.dst_addr = masks->ipv4_mask.dst_ip;
199 ip4_val.dst_addr = input->flow.ip4_flow.dst_ip;
202 enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3,
203 &ip4_mask, &ip4_val, sizeof(struct ipv4_hdr));
206 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
207 struct udp_hdr udp_mask, udp_val;
208 memset(&udp_mask, 0, sizeof(udp_mask));
209 memset(&udp_val, 0, sizeof(udp_val));
211 if (input->flow.udp6_flow.src_port) {
212 udp_mask.src_port = masks->src_port_mask;
213 udp_val.src_port = input->flow.udp6_flow.src_port;
215 if (input->flow.udp6_flow.dst_port) {
216 udp_mask.dst_port = masks->dst_port_mask;
217 udp_val.dst_port = input->flow.udp6_flow.dst_port;
219 enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
220 &udp_mask, &udp_val, sizeof(struct udp_hdr));
221 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
222 struct tcp_hdr tcp_mask, tcp_val;
223 memset(&tcp_mask, 0, sizeof(tcp_mask));
224 memset(&tcp_val, 0, sizeof(tcp_val));
226 if (input->flow.tcp6_flow.src_port) {
227 tcp_mask.src_port = masks->src_port_mask;
228 tcp_val.src_port = input->flow.tcp6_flow.src_port;
230 if (input->flow.tcp6_flow.dst_port) {
231 tcp_mask.dst_port = masks->dst_port_mask;
232 tcp_val.dst_port = input->flow.tcp6_flow.dst_port;
234 enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
235 &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
236 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
237 struct sctp_hdr sctp_mask, sctp_val;
238 memset(&sctp_mask, 0, sizeof(sctp_mask));
239 memset(&sctp_val, 0, sizeof(sctp_val));
241 if (input->flow.sctp6_flow.src_port) {
242 sctp_mask.src_port = masks->src_port_mask;
243 sctp_val.src_port = input->flow.sctp6_flow.src_port;
245 if (input->flow.sctp6_flow.dst_port) {
246 sctp_mask.dst_port = masks->dst_port_mask;
247 sctp_val.dst_port = input->flow.sctp6_flow.dst_port;
249 if (input->flow.sctp6_flow.verify_tag) {
250 sctp_mask.tag = 0xffffffff;
251 sctp_val.tag = input->flow.sctp6_flow.verify_tag;
254 /* v4 proto should be 132, override ipv6_flow.proto */
255 input->flow.ipv6_flow.proto = 132;
257 enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
258 &sctp_val, sizeof(struct sctp_hdr));
261 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP ||
262 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP ||
263 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP ||
264 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) {
265 struct ipv6_hdr ipv6_mask, ipv6_val;
266 memset(&ipv6_mask, 0, sizeof(struct ipv6_hdr));
267 memset(&ipv6_val, 0, sizeof(struct ipv6_hdr));
269 if (input->flow.ipv6_flow.proto) {
270 ipv6_mask.proto = masks->ipv6_mask.proto;
271 ipv6_val.proto = input->flow.ipv6_flow.proto;
273 memcpy(ipv6_mask.src_addr, masks->ipv6_mask.src_ip,
274 sizeof(ipv6_mask.src_addr));
275 memcpy(ipv6_val.src_addr, input->flow.ipv6_flow.src_ip,
276 sizeof(ipv6_val.src_addr));
277 memcpy(ipv6_mask.dst_addr, masks->ipv6_mask.dst_ip,
278 sizeof(ipv6_mask.dst_addr));
279 memcpy(ipv6_val.dst_addr, input->flow.ipv6_flow.dst_ip,
280 sizeof(ipv6_val.dst_addr));
281 if (input->flow.ipv6_flow.tc) {
282 ipv6_mask.vtc_flow = masks->ipv6_mask.tc << 12;
283 ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12;
285 if (input->flow.ipv6_flow.hop_limits) {
286 ipv6_mask.hop_limits = masks->ipv6_mask.hop_limits;
287 ipv6_val.hop_limits = input->flow.ipv6_flow.hop_limits;
290 enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3,
291 &ipv6_mask, &ipv6_val, sizeof(struct ipv6_hdr));
295 int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
298 struct enic_fdir_node *key;
299 /* See if the key is in the table */
300 pos = rte_hash_del_key(enic->fdir.hash, params);
304 enic->fdir.stats.f_remove++;
307 /* The entry is present in the table */
308 key = enic->fdir.nodes[pos];
310 /* Delete the filter */
311 vnic_dev_classifier(enic->vdev, CLSF_DEL,
312 &key->fltr_id, NULL, NULL);
314 enic->fdir.nodes[pos] = NULL;
315 enic->fdir.stats.free++;
316 enic->fdir.stats.remove++;
322 int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
324 struct enic_fdir_node *key;
325 struct filter_v2 fltr;
329 u32 flowtype_supported;
332 struct filter_action_v2 action;
334 memset(&fltr, 0, sizeof(fltr));
335 memset(&action, 0, sizeof(action));
336 flowtype_supported = enic->fdir.types_mask
337 & (1 << params->input.flow_type);
339 flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
340 (params->input.flow_ext.flexbytes[0] & 0xFF));
342 if (!enic->fdir.hash ||
343 (params->input.flow_ext.vlan_tci & 0xFFF) ||
344 !flowtype_supported || flex_bytes ||
345 params->action.behavior /* drop */) {
346 enic->fdir.stats.f_add++;
350 /* Get the enicpmd RQ from the DPDK Rx queue */
351 queue = enic_rte_rq_idx_to_sop_idx(params->action.rx_queue);
353 if (!enic->rq[queue].in_use)
356 /* See if the key is already there in the table */
357 pos = rte_hash_del_key(enic->fdir.hash, params);
360 enic->fdir.stats.f_add++;
363 /* Add a new classifier entry */
364 if (!enic->fdir.stats.free) {
365 enic->fdir.stats.f_add++;
368 key = rte_zmalloc("enic_fdir_node",
369 sizeof(struct enic_fdir_node), 0);
371 enic->fdir.stats.f_add++;
376 /* The entry is already present in the table.
377 * Check if there is a change in queue
379 key = enic->fdir.nodes[pos];
380 enic->fdir.nodes[pos] = NULL;
381 if (unlikely(key->rq_index == queue)) {
382 /* Nothing to be done */
383 enic->fdir.stats.f_add++;
384 pos = rte_hash_add_key(enic->fdir.hash, params);
386 dev_err(enic, "Add hash key failed\n");
389 enic->fdir.nodes[pos] = key;
391 "FDIR rule is already present\n");
395 if (likely(enic->fdir.stats.free)) {
396 /* Add the filter and then delete the old one.
397 * This is to avoid packets from going into the
398 * default queue during the window between
402 old_fltr_id = key->fltr_id;
404 /* No free slots in the classifier.
405 * Delete the filter and add the modified one later
407 vnic_dev_classifier(enic->vdev, CLSF_DEL,
408 &key->fltr_id, NULL, NULL);
409 enic->fdir.stats.free++;
415 key->filter = *params;
416 key->rq_index = queue;
418 enic->fdir.copy_fltr_fn(&fltr, ¶ms->input,
419 &enic->rte_dev->data->dev_conf.fdir_conf.mask);
420 action.type = FILTER_ACTION_RQ_STEERING;
421 action.rq_idx = queue;
423 if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr,
425 key->fltr_id = queue;
427 dev_err(enic, "Add classifier entry failed\n");
428 enic->fdir.stats.f_add++;
434 vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL,
437 enic->fdir.stats.free--;
438 enic->fdir.stats.add++;
441 pos = rte_hash_add_key(enic->fdir.hash, params);
443 enic->fdir.stats.f_add++;
444 dev_err(enic, "Add hash key failed\n");
448 enic->fdir.nodes[pos] = key;
452 void enic_clsf_destroy(struct enic *enic)
455 struct enic_fdir_node *key;
456 /* delete classifier entries */
457 for (index = 0; index < ENICPMD_FDIR_MAX; index++) {
458 key = enic->fdir.nodes[index];
460 vnic_dev_classifier(enic->vdev, CLSF_DEL,
461 &key->fltr_id, NULL, NULL);
463 enic->fdir.nodes[index] = NULL;
467 if (enic->fdir.hash) {
468 rte_hash_free(enic->fdir.hash);
469 enic->fdir.hash = NULL;
473 int enic_clsf_init(struct enic *enic)
475 char clsf_name[RTE_HASH_NAMESIZE];
476 struct rte_hash_parameters hash_params = {
478 .entries = ENICPMD_CLSF_HASH_ENTRIES,
479 .key_len = sizeof(struct rte_eth_fdir_filter),
480 .hash_func = DEFAULT_HASH_FUNC,
481 .hash_func_init_val = 0,
482 .socket_id = SOCKET_ID_ANY,
484 snprintf(clsf_name, RTE_HASH_NAMESIZE, "enic_clsf_%s", enic->bdf_name);
485 enic->fdir.hash = rte_hash_create(&hash_params);
486 memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats));
487 enic->fdir.stats.free = ENICPMD_FDIR_MAX;
488 return NULL == enic->fdir.hash;