1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
6 #include <rte_ethdev_driver.h>
7 #include <rte_malloc.h>
9 #include <rte_byteorder.h>
15 #include "enic_compat.h"
17 #include "wq_enet_desc.h"
18 #include "rq_enet_desc.h"
19 #include "cq_enet_desc.h"
20 #include "vnic_enet.h"
25 #include "vnic_intr.h"
29 #include <rte_hash_crc.h>
30 #define DEFAULT_HASH_FUNC rte_hash_crc
32 #include <rte_jhash.h>
33 #define DEFAULT_HASH_FUNC rte_jhash
36 #define ENICPMD_CLSF_HASH_ENTRIES ENICPMD_FDIR_MAX
38 static void copy_fltr_v1(struct filter_v2 *fltr,
39 const struct rte_eth_fdir_input *input,
40 const struct rte_eth_fdir_masks *masks);
41 static void copy_fltr_v2(struct filter_v2 *fltr,
42 const struct rte_eth_fdir_input *input,
43 const struct rte_eth_fdir_masks *masks);
45 void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats)
47 *stats = enic->fdir.stats;
50 void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *info)
52 info->mode = (enum rte_fdir_mode)enic->fdir.modes;
53 info->flow_types_mask[0] = enic->fdir.types_mask;
56 void enic_fdir_info(struct enic *enic)
58 enic->fdir.modes = (uint32_t)RTE_FDIR_MODE_PERFECT;
59 enic->fdir.types_mask = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP |
60 1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
61 if (enic->adv_filters) {
62 enic->fdir.types_mask |= 1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER |
63 1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP |
64 1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP |
65 1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP |
66 1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP |
67 1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
68 enic->fdir.copy_fltr_fn = copy_fltr_v2;
70 enic->fdir.copy_fltr_fn = copy_fltr_v1;
75 enic_set_layer(struct filter_generic_1 *gp, unsigned int flag,
76 enum filter_generic_1_layer layer, void *mask, void *val,
79 gp->mask_flags |= flag;
80 gp->val_flags |= gp->mask_flags;
81 memcpy(gp->layer[layer].mask, mask, len);
82 memcpy(gp->layer[layer].val, val, len);
85 /* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
86 * without advanced filter support.
89 copy_fltr_v1(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
90 __rte_unused const struct rte_eth_fdir_masks *masks)
92 fltr->type = FILTER_IPV4_5TUPLE;
93 fltr->u.ipv4.src_addr = rte_be_to_cpu_32(
94 input->flow.ip4_flow.src_ip);
95 fltr->u.ipv4.dst_addr = rte_be_to_cpu_32(
96 input->flow.ip4_flow.dst_ip);
97 fltr->u.ipv4.src_port = rte_be_to_cpu_16(
98 input->flow.udp4_flow.src_port);
99 fltr->u.ipv4.dst_port = rte_be_to_cpu_16(
100 input->flow.udp4_flow.dst_port);
102 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
103 fltr->u.ipv4.protocol = PROTO_TCP;
105 fltr->u.ipv4.protocol = PROTO_UDP;
107 fltr->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
110 /* Copy Flow Director filter to a VIC generic filter (requires advanced
114 copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
115 const struct rte_eth_fdir_masks *masks)
117 struct filter_generic_1 *gp = &fltr->u.generic_1;
119 fltr->type = FILTER_DPDK_1;
120 memset(gp, 0, sizeof(*gp));
122 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
123 struct rte_udp_hdr udp_mask, udp_val;
124 memset(&udp_mask, 0, sizeof(udp_mask));
125 memset(&udp_val, 0, sizeof(udp_val));
127 if (input->flow.udp4_flow.src_port) {
128 udp_mask.src_port = masks->src_port_mask;
129 udp_val.src_port = input->flow.udp4_flow.src_port;
131 if (input->flow.udp4_flow.dst_port) {
132 udp_mask.dst_port = masks->dst_port_mask;
133 udp_val.dst_port = input->flow.udp4_flow.dst_port;
136 enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
137 &udp_mask, &udp_val, sizeof(struct rte_udp_hdr));
138 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
139 struct rte_tcp_hdr tcp_mask, tcp_val;
140 memset(&tcp_mask, 0, sizeof(tcp_mask));
141 memset(&tcp_val, 0, sizeof(tcp_val));
143 if (input->flow.tcp4_flow.src_port) {
144 tcp_mask.src_port = masks->src_port_mask;
145 tcp_val.src_port = input->flow.tcp4_flow.src_port;
147 if (input->flow.tcp4_flow.dst_port) {
148 tcp_mask.dst_port = masks->dst_port_mask;
149 tcp_val.dst_port = input->flow.tcp4_flow.dst_port;
152 enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
153 &tcp_mask, &tcp_val, sizeof(struct rte_tcp_hdr));
154 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
155 struct rte_sctp_hdr sctp_mask, sctp_val;
156 memset(&sctp_mask, 0, sizeof(sctp_mask));
157 memset(&sctp_val, 0, sizeof(sctp_val));
159 if (input->flow.sctp4_flow.src_port) {
160 sctp_mask.src_port = masks->src_port_mask;
161 sctp_val.src_port = input->flow.sctp4_flow.src_port;
163 if (input->flow.sctp4_flow.dst_port) {
164 sctp_mask.dst_port = masks->dst_port_mask;
165 sctp_val.dst_port = input->flow.sctp4_flow.dst_port;
167 if (input->flow.sctp4_flow.verify_tag) {
168 sctp_mask.tag = 0xffffffff;
169 sctp_val.tag = input->flow.sctp4_flow.verify_tag;
173 * Unlike UDP/TCP (FILTER_GENERIC_1_{UDP,TCP}), the firmware
174 * has no "packet is SCTP" flag. Use flag=0 (generic L4) and
175 * manually set proto_id=sctp below.
177 enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
178 &sctp_val, sizeof(struct rte_sctp_hdr));
181 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP ||
182 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP ||
183 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP ||
184 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) {
185 struct rte_ipv4_hdr ip4_mask, ip4_val;
186 memset(&ip4_mask, 0, sizeof(struct rte_ipv4_hdr));
187 memset(&ip4_val, 0, sizeof(struct rte_ipv4_hdr));
189 if (input->flow.ip4_flow.tos) {
190 ip4_mask.type_of_service = masks->ipv4_mask.tos;
191 ip4_val.type_of_service = input->flow.ip4_flow.tos;
193 if (input->flow.ip4_flow.ttl) {
194 ip4_mask.time_to_live = masks->ipv4_mask.ttl;
195 ip4_val.time_to_live = input->flow.ip4_flow.ttl;
197 if (input->flow.ip4_flow.proto) {
198 ip4_mask.next_proto_id = masks->ipv4_mask.proto;
199 ip4_val.next_proto_id = input->flow.ip4_flow.proto;
200 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
201 /* Explicitly match the SCTP protocol number */
202 ip4_mask.next_proto_id = 0xff;
203 ip4_val.next_proto_id = IPPROTO_SCTP;
205 if (input->flow.ip4_flow.src_ip) {
206 ip4_mask.src_addr = masks->ipv4_mask.src_ip;
207 ip4_val.src_addr = input->flow.ip4_flow.src_ip;
209 if (input->flow.ip4_flow.dst_ip) {
210 ip4_mask.dst_addr = masks->ipv4_mask.dst_ip;
211 ip4_val.dst_addr = input->flow.ip4_flow.dst_ip;
214 enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3,
215 &ip4_mask, &ip4_val, sizeof(struct rte_ipv4_hdr));
218 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
219 struct rte_udp_hdr udp_mask, udp_val;
220 memset(&udp_mask, 0, sizeof(udp_mask));
221 memset(&udp_val, 0, sizeof(udp_val));
223 if (input->flow.udp6_flow.src_port) {
224 udp_mask.src_port = masks->src_port_mask;
225 udp_val.src_port = input->flow.udp6_flow.src_port;
227 if (input->flow.udp6_flow.dst_port) {
228 udp_mask.dst_port = masks->dst_port_mask;
229 udp_val.dst_port = input->flow.udp6_flow.dst_port;
231 enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
232 &udp_mask, &udp_val, sizeof(struct rte_udp_hdr));
233 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
234 struct rte_tcp_hdr tcp_mask, tcp_val;
235 memset(&tcp_mask, 0, sizeof(tcp_mask));
236 memset(&tcp_val, 0, sizeof(tcp_val));
238 if (input->flow.tcp6_flow.src_port) {
239 tcp_mask.src_port = masks->src_port_mask;
240 tcp_val.src_port = input->flow.tcp6_flow.src_port;
242 if (input->flow.tcp6_flow.dst_port) {
243 tcp_mask.dst_port = masks->dst_port_mask;
244 tcp_val.dst_port = input->flow.tcp6_flow.dst_port;
246 enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
247 &tcp_mask, &tcp_val, sizeof(struct rte_tcp_hdr));
248 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
249 struct rte_sctp_hdr sctp_mask, sctp_val;
250 memset(&sctp_mask, 0, sizeof(sctp_mask));
251 memset(&sctp_val, 0, sizeof(sctp_val));
253 if (input->flow.sctp6_flow.src_port) {
254 sctp_mask.src_port = masks->src_port_mask;
255 sctp_val.src_port = input->flow.sctp6_flow.src_port;
257 if (input->flow.sctp6_flow.dst_port) {
258 sctp_mask.dst_port = masks->dst_port_mask;
259 sctp_val.dst_port = input->flow.sctp6_flow.dst_port;
261 if (input->flow.sctp6_flow.verify_tag) {
262 sctp_mask.tag = 0xffffffff;
263 sctp_val.tag = input->flow.sctp6_flow.verify_tag;
266 enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
267 &sctp_val, sizeof(struct rte_sctp_hdr));
270 if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP ||
271 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP ||
272 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP ||
273 input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) {
274 struct rte_ipv6_hdr ipv6_mask, ipv6_val;
275 memset(&ipv6_mask, 0, sizeof(struct rte_ipv6_hdr));
276 memset(&ipv6_val, 0, sizeof(struct rte_ipv6_hdr));
278 if (input->flow.ipv6_flow.proto) {
279 ipv6_mask.proto = masks->ipv6_mask.proto;
280 ipv6_val.proto = input->flow.ipv6_flow.proto;
281 } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
282 /* See comments for IPv4 SCTP above. */
283 ipv6_mask.proto = 0xff;
284 ipv6_val.proto = IPPROTO_SCTP;
286 memcpy(ipv6_mask.src_addr, masks->ipv6_mask.src_ip,
287 sizeof(ipv6_mask.src_addr));
288 memcpy(ipv6_val.src_addr, input->flow.ipv6_flow.src_ip,
289 sizeof(ipv6_val.src_addr));
290 memcpy(ipv6_mask.dst_addr, masks->ipv6_mask.dst_ip,
291 sizeof(ipv6_mask.dst_addr));
292 memcpy(ipv6_val.dst_addr, input->flow.ipv6_flow.dst_ip,
293 sizeof(ipv6_val.dst_addr));
294 if (input->flow.ipv6_flow.tc) {
295 ipv6_mask.vtc_flow = masks->ipv6_mask.tc << 12;
296 ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12;
298 if (input->flow.ipv6_flow.hop_limits) {
299 ipv6_mask.hop_limits = masks->ipv6_mask.hop_limits;
300 ipv6_val.hop_limits = input->flow.ipv6_flow.hop_limits;
303 enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3,
304 &ipv6_mask, &ipv6_val, sizeof(struct rte_ipv6_hdr));
308 int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
311 struct enic_fdir_node *key;
312 /* See if the key is in the table */
313 pos = rte_hash_del_key(enic->fdir.hash, params);
317 enic->fdir.stats.f_remove++;
320 /* The entry is present in the table */
321 key = enic->fdir.nodes[pos];
323 /* Delete the filter */
324 vnic_dev_classifier(enic->vdev, CLSF_DEL,
325 &key->fltr_id, NULL, NULL);
327 enic->fdir.nodes[pos] = NULL;
328 enic->fdir.stats.free++;
329 enic->fdir.stats.remove++;
335 int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
337 struct enic_fdir_node *key;
338 struct filter_v2 fltr;
341 uint16_t old_fltr_id = 0;
342 uint32_t flowtype_supported;
345 struct filter_action_v2 action;
347 memset(&fltr, 0, sizeof(fltr));
348 memset(&action, 0, sizeof(action));
349 flowtype_supported = enic->fdir.types_mask
350 & (1 << params->input.flow_type);
352 flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
353 (params->input.flow_ext.flexbytes[0] & 0xFF));
355 if (!enic->fdir.hash ||
356 (params->input.flow_ext.vlan_tci & 0xFFF) ||
357 !flowtype_supported || flex_bytes ||
358 params->action.behavior /* drop */) {
359 enic->fdir.stats.f_add++;
363 /* Get the enicpmd RQ from the DPDK Rx queue */
364 queue = enic_rte_rq_idx_to_sop_idx(params->action.rx_queue);
366 if (!enic->rq[queue].in_use)
369 /* See if the key is already there in the table */
370 pos = rte_hash_del_key(enic->fdir.hash, params);
373 enic->fdir.stats.f_add++;
376 /* Add a new classifier entry */
377 if (!enic->fdir.stats.free) {
378 enic->fdir.stats.f_add++;
381 key = rte_zmalloc("enic_fdir_node",
382 sizeof(struct enic_fdir_node), 0);
384 enic->fdir.stats.f_add++;
389 /* The entry is already present in the table.
390 * Check if there is a change in queue
392 key = enic->fdir.nodes[pos];
393 enic->fdir.nodes[pos] = NULL;
394 if (unlikely(key->rq_index == queue)) {
395 /* Nothing to be done */
396 enic->fdir.stats.f_add++;
397 pos = rte_hash_add_key(enic->fdir.hash, params);
399 dev_err(enic, "Add hash key failed\n");
402 enic->fdir.nodes[pos] = key;
404 "FDIR rule is already present\n");
408 if (likely(enic->fdir.stats.free)) {
409 /* Add the filter and then delete the old one.
410 * This is to avoid packets from going into the
411 * default queue during the window between
415 old_fltr_id = key->fltr_id;
417 /* No free slots in the classifier.
418 * Delete the filter and add the modified one later
420 vnic_dev_classifier(enic->vdev, CLSF_DEL,
421 &key->fltr_id, NULL, NULL);
422 enic->fdir.stats.free++;
428 key->filter = *params;
429 key->rq_index = queue;
431 enic->fdir.copy_fltr_fn(&fltr, ¶ms->input,
432 &enic->rte_dev->data->dev_conf.fdir_conf.mask);
433 action.type = FILTER_ACTION_RQ_STEERING;
434 action.rq_idx = queue;
436 if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr,
438 key->fltr_id = queue;
440 dev_err(enic, "Add classifier entry failed\n");
441 enic->fdir.stats.f_add++;
447 vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL,
450 enic->fdir.stats.free--;
451 enic->fdir.stats.add++;
454 pos = rte_hash_add_key(enic->fdir.hash, params);
456 enic->fdir.stats.f_add++;
457 dev_err(enic, "Add hash key failed\n");
461 enic->fdir.nodes[pos] = key;
465 void enic_clsf_destroy(struct enic *enic)
468 struct enic_fdir_node *key;
469 /* delete classifier entries */
470 for (index = 0; index < ENICPMD_FDIR_MAX; index++) {
471 key = enic->fdir.nodes[index];
473 vnic_dev_classifier(enic->vdev, CLSF_DEL,
474 &key->fltr_id, NULL, NULL);
476 enic->fdir.nodes[index] = NULL;
480 if (enic->fdir.hash) {
481 rte_hash_free(enic->fdir.hash);
482 enic->fdir.hash = NULL;
486 int enic_clsf_init(struct enic *enic)
488 char clsf_name[RTE_HASH_NAMESIZE];
489 struct rte_hash_parameters hash_params = {
491 .entries = ENICPMD_CLSF_HASH_ENTRIES,
492 .key_len = sizeof(struct rte_eth_fdir_filter),
493 .hash_func = DEFAULT_HASH_FUNC,
494 .hash_func_init_val = 0,
495 .socket_id = SOCKET_ID_ANY,
497 snprintf(clsf_name, RTE_HASH_NAMESIZE, "enic_clsf_%s", enic->bdf_name);
498 enic->fdir.hash = rte_hash_create(&hash_params);
499 memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats));
500 enic->fdir.stats.free = ENICPMD_FDIR_MAX;
501 return NULL == enic->fdir.hash;