net/enic: remove useless include
[dpdk.git] / drivers / net / enic / enic_clsf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4  */
5
6 #include <rte_ethdev_driver.h>
7 #include <rte_malloc.h>
8 #include <rte_hash.h>
9 #include <rte_byteorder.h>
10 #include <rte_ip.h>
11 #include <rte_tcp.h>
12 #include <rte_udp.h>
13 #include <rte_sctp.h>
14 #include <rte_eth_ctrl.h>
15
16 #include "enic_compat.h"
17 #include "enic.h"
18 #include "wq_enet_desc.h"
19 #include "rq_enet_desc.h"
20 #include "cq_enet_desc.h"
21 #include "vnic_enet.h"
22 #include "vnic_dev.h"
23 #include "vnic_wq.h"
24 #include "vnic_rq.h"
25 #include "vnic_cq.h"
26 #include "vnic_intr.h"
27 #include "vnic_nic.h"
28
29 #ifdef RTE_ARCH_X86
30 #include <rte_hash_crc.h>
31 #define DEFAULT_HASH_FUNC       rte_hash_crc
32 #else
33 #include <rte_jhash.h>
34 #define DEFAULT_HASH_FUNC       rte_jhash
35 #endif
36
37 #define ENICPMD_CLSF_HASH_ENTRIES       ENICPMD_FDIR_MAX
38
39 void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats)
40 {
41         *stats = enic->fdir.stats;
42 }
43
44 void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *info)
45 {
46         info->mode = (enum rte_fdir_mode)enic->fdir.modes;
47         info->flow_types_mask[0] = enic->fdir.types_mask;
48 }
49
50 void enic_fdir_info(struct enic *enic)
51 {
52         enic->fdir.modes = (u32)RTE_FDIR_MODE_PERFECT;
53         enic->fdir.types_mask  = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP |
54                                  1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
55         if (enic->adv_filters) {
56                 enic->fdir.types_mask |= 1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER |
57                                          1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP |
58                                          1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP |
59                                          1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP |
60                                          1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP |
61                                          1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
62                 enic->fdir.copy_fltr_fn = copy_fltr_v2;
63         } else {
64                 enic->fdir.copy_fltr_fn = copy_fltr_v1;
65         }
66 }
67
68 static void
69 enic_set_layer(struct filter_generic_1 *gp, unsigned int flag,
70                enum filter_generic_1_layer layer, void *mask, void *val,
71                unsigned int len)
72 {
73         gp->mask_flags |= flag;
74         gp->val_flags |= gp->mask_flags;
75         memcpy(gp->layer[layer].mask, mask, len);
76         memcpy(gp->layer[layer].val, val, len);
77 }
78
79 /* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
80  * without advanced filter support.
81  */
82 void
83 copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
84              __rte_unused struct rte_eth_fdir_masks *masks)
85 {
86         fltr->type = FILTER_IPV4_5TUPLE;
87         fltr->u.ipv4.src_addr = rte_be_to_cpu_32(
88                 input->flow.ip4_flow.src_ip);
89         fltr->u.ipv4.dst_addr = rte_be_to_cpu_32(
90                 input->flow.ip4_flow.dst_ip);
91         fltr->u.ipv4.src_port = rte_be_to_cpu_16(
92                 input->flow.udp4_flow.src_port);
93         fltr->u.ipv4.dst_port = rte_be_to_cpu_16(
94                 input->flow.udp4_flow.dst_port);
95
96         if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
97                 fltr->u.ipv4.protocol = PROTO_TCP;
98         else
99                 fltr->u.ipv4.protocol = PROTO_UDP;
100
101         fltr->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
102 }
103
104 /* Copy Flow Director filter to a VIC generic filter (requires advanced
105  * filter support.
106  */
107 void
108 copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
109              struct rte_eth_fdir_masks *masks)
110 {
111         struct filter_generic_1 *gp = &fltr->u.generic_1;
112
113         fltr->type = FILTER_DPDK_1;
114         memset(gp, 0, sizeof(*gp));
115
116         if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
117                 struct udp_hdr udp_mask, udp_val;
118                 memset(&udp_mask, 0, sizeof(udp_mask));
119                 memset(&udp_val, 0, sizeof(udp_val));
120
121                 if (input->flow.udp4_flow.src_port) {
122                         udp_mask.src_port = masks->src_port_mask;
123                         udp_val.src_port = input->flow.udp4_flow.src_port;
124                 }
125                 if (input->flow.udp4_flow.dst_port) {
126                         udp_mask.dst_port = masks->dst_port_mask;
127                         udp_val.dst_port = input->flow.udp4_flow.dst_port;
128                 }
129
130                 enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
131                                &udp_mask, &udp_val, sizeof(struct udp_hdr));
132         } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
133                 struct tcp_hdr tcp_mask, tcp_val;
134                 memset(&tcp_mask, 0, sizeof(tcp_mask));
135                 memset(&tcp_val, 0, sizeof(tcp_val));
136
137                 if (input->flow.tcp4_flow.src_port) {
138                         tcp_mask.src_port = masks->src_port_mask;
139                         tcp_val.src_port = input->flow.tcp4_flow.src_port;
140                 }
141                 if (input->flow.tcp4_flow.dst_port) {
142                         tcp_mask.dst_port = masks->dst_port_mask;
143                         tcp_val.dst_port = input->flow.tcp4_flow.dst_port;
144                 }
145
146                 enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
147                                &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
148         } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
149                 struct sctp_hdr sctp_mask, sctp_val;
150                 memset(&sctp_mask, 0, sizeof(sctp_mask));
151                 memset(&sctp_val, 0, sizeof(sctp_val));
152
153                 if (input->flow.sctp4_flow.src_port) {
154                         sctp_mask.src_port = masks->src_port_mask;
155                         sctp_val.src_port = input->flow.sctp4_flow.src_port;
156                 }
157                 if (input->flow.sctp4_flow.dst_port) {
158                         sctp_mask.dst_port = masks->dst_port_mask;
159                         sctp_val.dst_port = input->flow.sctp4_flow.dst_port;
160                 }
161                 if (input->flow.sctp4_flow.verify_tag) {
162                         sctp_mask.tag = 0xffffffff;
163                         sctp_val.tag = input->flow.sctp4_flow.verify_tag;
164                 }
165
166                 /* v4 proto should be 132, override ip4_flow.proto */
167                 input->flow.ip4_flow.proto = 132;
168
169                 enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
170                                &sctp_val, sizeof(struct sctp_hdr));
171         }
172
173         if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP ||
174             input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP ||
175             input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP ||
176             input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) {
177                 struct ipv4_hdr ip4_mask, ip4_val;
178                 memset(&ip4_mask, 0, sizeof(struct ipv4_hdr));
179                 memset(&ip4_val, 0, sizeof(struct ipv4_hdr));
180
181                 if (input->flow.ip4_flow.tos) {
182                         ip4_mask.type_of_service = masks->ipv4_mask.tos;
183                         ip4_val.type_of_service = input->flow.ip4_flow.tos;
184                 }
185                 if (input->flow.ip4_flow.ttl) {
186                         ip4_mask.time_to_live = masks->ipv4_mask.ttl;
187                         ip4_val.time_to_live = input->flow.ip4_flow.ttl;
188                 }
189                 if (input->flow.ip4_flow.proto) {
190                         ip4_mask.next_proto_id = masks->ipv4_mask.proto;
191                         ip4_val.next_proto_id = input->flow.ip4_flow.proto;
192                 }
193                 if (input->flow.ip4_flow.src_ip) {
194                         ip4_mask.src_addr =  masks->ipv4_mask.src_ip;
195                         ip4_val.src_addr = input->flow.ip4_flow.src_ip;
196                 }
197                 if (input->flow.ip4_flow.dst_ip) {
198                         ip4_mask.dst_addr =  masks->ipv4_mask.dst_ip;
199                         ip4_val.dst_addr = input->flow.ip4_flow.dst_ip;
200                 }
201
202                 enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3,
203                                &ip4_mask, &ip4_val, sizeof(struct ipv4_hdr));
204         }
205
206         if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
207                 struct udp_hdr udp_mask, udp_val;
208                 memset(&udp_mask, 0, sizeof(udp_mask));
209                 memset(&udp_val, 0, sizeof(udp_val));
210
211                 if (input->flow.udp6_flow.src_port) {
212                         udp_mask.src_port = masks->src_port_mask;
213                         udp_val.src_port = input->flow.udp6_flow.src_port;
214                 }
215                 if (input->flow.udp6_flow.dst_port) {
216                         udp_mask.dst_port = masks->dst_port_mask;
217                         udp_val.dst_port = input->flow.udp6_flow.dst_port;
218                 }
219                 enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
220                                &udp_mask, &udp_val, sizeof(struct udp_hdr));
221         } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
222                 struct tcp_hdr tcp_mask, tcp_val;
223                 memset(&tcp_mask, 0, sizeof(tcp_mask));
224                 memset(&tcp_val, 0, sizeof(tcp_val));
225
226                 if (input->flow.tcp6_flow.src_port) {
227                         tcp_mask.src_port = masks->src_port_mask;
228                         tcp_val.src_port = input->flow.tcp6_flow.src_port;
229                 }
230                 if (input->flow.tcp6_flow.dst_port) {
231                         tcp_mask.dst_port = masks->dst_port_mask;
232                         tcp_val.dst_port = input->flow.tcp6_flow.dst_port;
233                 }
234                 enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
235                                &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
236         } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
237                 struct sctp_hdr sctp_mask, sctp_val;
238                 memset(&sctp_mask, 0, sizeof(sctp_mask));
239                 memset(&sctp_val, 0, sizeof(sctp_val));
240
241                 if (input->flow.sctp6_flow.src_port) {
242                         sctp_mask.src_port = masks->src_port_mask;
243                         sctp_val.src_port = input->flow.sctp6_flow.src_port;
244                 }
245                 if (input->flow.sctp6_flow.dst_port) {
246                         sctp_mask.dst_port = masks->dst_port_mask;
247                         sctp_val.dst_port = input->flow.sctp6_flow.dst_port;
248                 }
249                 if (input->flow.sctp6_flow.verify_tag) {
250                         sctp_mask.tag = 0xffffffff;
251                         sctp_val.tag = input->flow.sctp6_flow.verify_tag;
252                 }
253
254                 /* v4 proto should be 132, override ipv6_flow.proto */
255                 input->flow.ipv6_flow.proto = 132;
256
257                 enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
258                                &sctp_val, sizeof(struct sctp_hdr));
259         }
260
261         if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP ||
262             input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP ||
263             input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP ||
264             input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) {
265                 struct ipv6_hdr ipv6_mask, ipv6_val;
266                 memset(&ipv6_mask, 0, sizeof(struct ipv6_hdr));
267                 memset(&ipv6_val, 0, sizeof(struct ipv6_hdr));
268
269                 if (input->flow.ipv6_flow.proto) {
270                         ipv6_mask.proto = masks->ipv6_mask.proto;
271                         ipv6_val.proto = input->flow.ipv6_flow.proto;
272                 }
273                 memcpy(ipv6_mask.src_addr, masks->ipv6_mask.src_ip,
274                        sizeof(ipv6_mask.src_addr));
275                 memcpy(ipv6_val.src_addr, input->flow.ipv6_flow.src_ip,
276                        sizeof(ipv6_val.src_addr));
277                 memcpy(ipv6_mask.dst_addr, masks->ipv6_mask.dst_ip,
278                        sizeof(ipv6_mask.dst_addr));
279                 memcpy(ipv6_val.dst_addr, input->flow.ipv6_flow.dst_ip,
280                        sizeof(ipv6_val.dst_addr));
281                 if (input->flow.ipv6_flow.tc) {
282                         ipv6_mask.vtc_flow = masks->ipv6_mask.tc << 12;
283                         ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12;
284                 }
285                 if (input->flow.ipv6_flow.hop_limits) {
286                         ipv6_mask.hop_limits = masks->ipv6_mask.hop_limits;
287                         ipv6_val.hop_limits = input->flow.ipv6_flow.hop_limits;
288                 }
289
290                 enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3,
291                                &ipv6_mask, &ipv6_val, sizeof(struct ipv6_hdr));
292         }
293 }
294
295 int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
296 {
297         int32_t pos;
298         struct enic_fdir_node *key;
299         /* See if the key is in the table */
300         pos = rte_hash_del_key(enic->fdir.hash, params);
301         switch (pos) {
302         case -EINVAL:
303         case -ENOENT:
304                 enic->fdir.stats.f_remove++;
305                 return -EINVAL;
306         default:
307                 /* The entry is present in the table */
308                 key = enic->fdir.nodes[pos];
309
310                 /* Delete the filter */
311                 vnic_dev_classifier(enic->vdev, CLSF_DEL,
312                         &key->fltr_id, NULL, NULL);
313                 rte_free(key);
314                 enic->fdir.nodes[pos] = NULL;
315                 enic->fdir.stats.free++;
316                 enic->fdir.stats.remove++;
317                 break;
318         }
319         return 0;
320 }
321
322 int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
323 {
324         struct enic_fdir_node *key;
325         struct filter_v2 fltr;
326         int32_t pos;
327         u8 do_free = 0;
328         u16 old_fltr_id = 0;
329         u32 flowtype_supported;
330         u16 flex_bytes;
331         u16 queue;
332         struct filter_action_v2 action;
333
334         memset(&fltr, 0, sizeof(fltr));
335         memset(&action, 0, sizeof(action));
336         flowtype_supported = enic->fdir.types_mask
337                              & (1 << params->input.flow_type);
338
339         flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
340                 (params->input.flow_ext.flexbytes[0] & 0xFF));
341
342         if (!enic->fdir.hash ||
343                 (params->input.flow_ext.vlan_tci & 0xFFF) ||
344                 !flowtype_supported || flex_bytes ||
345                 params->action.behavior /* drop */) {
346                 enic->fdir.stats.f_add++;
347                 return -ENOTSUP;
348         }
349
350         /* Get the enicpmd RQ from the DPDK Rx queue */
351         queue = enic_rte_rq_idx_to_sop_idx(params->action.rx_queue);
352
353         if (!enic->rq[queue].in_use)
354                 return -EINVAL;
355
356         /* See if the key is already there in the table */
357         pos = rte_hash_del_key(enic->fdir.hash, params);
358         switch (pos) {
359         case -EINVAL:
360                 enic->fdir.stats.f_add++;
361                 return -EINVAL;
362         case -ENOENT:
363                 /* Add a new classifier entry */
364                 if (!enic->fdir.stats.free) {
365                         enic->fdir.stats.f_add++;
366                         return -ENOSPC;
367                 }
368                 key = rte_zmalloc("enic_fdir_node",
369                                   sizeof(struct enic_fdir_node), 0);
370                 if (!key) {
371                         enic->fdir.stats.f_add++;
372                         return -ENOMEM;
373                 }
374                 break;
375         default:
376                 /* The entry is already present in the table.
377                  * Check if there is a change in queue
378                  */
379                 key = enic->fdir.nodes[pos];
380                 enic->fdir.nodes[pos] = NULL;
381                 if (unlikely(key->rq_index == queue)) {
382                         /* Nothing to be done */
383                         enic->fdir.stats.f_add++;
384                         pos = rte_hash_add_key(enic->fdir.hash, params);
385                         if (pos < 0) {
386                                 dev_err(enic, "Add hash key failed\n");
387                                 return pos;
388                         }
389                         enic->fdir.nodes[pos] = key;
390                         dev_warning(enic,
391                                 "FDIR rule is already present\n");
392                         return 0;
393                 }
394
395                 if (likely(enic->fdir.stats.free)) {
396                         /* Add the filter and then delete the old one.
397                          * This is to avoid packets from going into the
398                          * default queue during the window between
399                          * delete and add
400                          */
401                         do_free = 1;
402                         old_fltr_id = key->fltr_id;
403                 } else {
404                         /* No free slots in the classifier.
405                          * Delete the filter and add the modified one later
406                          */
407                         vnic_dev_classifier(enic->vdev, CLSF_DEL,
408                                 &key->fltr_id, NULL, NULL);
409                         enic->fdir.stats.free++;
410                 }
411
412                 break;
413         }
414
415         key->filter = *params;
416         key->rq_index = queue;
417
418         enic->fdir.copy_fltr_fn(&fltr, &params->input,
419                                 &enic->rte_dev->data->dev_conf.fdir_conf.mask);
420         action.type = FILTER_ACTION_RQ_STEERING;
421         action.rq_idx = queue;
422
423         if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr,
424             &action)) {
425                 key->fltr_id = queue;
426         } else {
427                 dev_err(enic, "Add classifier entry failed\n");
428                 enic->fdir.stats.f_add++;
429                 rte_free(key);
430                 return -1;
431         }
432
433         if (do_free)
434                 vnic_dev_classifier(enic->vdev, CLSF_DEL, &old_fltr_id, NULL,
435                                     NULL);
436         else{
437                 enic->fdir.stats.free--;
438                 enic->fdir.stats.add++;
439         }
440
441         pos = rte_hash_add_key(enic->fdir.hash, params);
442         if (pos < 0) {
443                 enic->fdir.stats.f_add++;
444                 dev_err(enic, "Add hash key failed\n");
445                 return pos;
446         }
447
448         enic->fdir.nodes[pos] = key;
449         return 0;
450 }
451
452 void enic_clsf_destroy(struct enic *enic)
453 {
454         u32 index;
455         struct enic_fdir_node *key;
456         /* delete classifier entries */
457         for (index = 0; index < ENICPMD_FDIR_MAX; index++) {
458                 key = enic->fdir.nodes[index];
459                 if (key) {
460                         vnic_dev_classifier(enic->vdev, CLSF_DEL,
461                                 &key->fltr_id, NULL, NULL);
462                         rte_free(key);
463                         enic->fdir.nodes[index] = NULL;
464                 }
465         }
466
467         if (enic->fdir.hash) {
468                 rte_hash_free(enic->fdir.hash);
469                 enic->fdir.hash = NULL;
470         }
471 }
472
473 int enic_clsf_init(struct enic *enic)
474 {
475         char clsf_name[RTE_HASH_NAMESIZE];
476         struct rte_hash_parameters hash_params = {
477                 .name = clsf_name,
478                 .entries = ENICPMD_CLSF_HASH_ENTRIES,
479                 .key_len = sizeof(struct rte_eth_fdir_filter),
480                 .hash_func = DEFAULT_HASH_FUNC,
481                 .hash_func_init_val = 0,
482                 .socket_id = SOCKET_ID_ANY,
483         };
484         snprintf(clsf_name, RTE_HASH_NAMESIZE, "enic_clsf_%s", enic->bdf_name);
485         enic->fdir.hash = rte_hash_create(&hash_params);
486         memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats));
487         enic->fdir.stats.free = ENICPMD_FDIR_MAX;
488         return NULL == enic->fdir.hash;
489 }