net/ice: enable switch filter
[dpdk.git] / drivers / net / ice / ice_switch_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_debug.h>
14 #include <rte_ether.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_log.h>
17 #include <rte_malloc.h>
18 #include <rte_eth_ctrl.h>
19 #include <rte_tailq.h>
20 #include <rte_flow_driver.h>
21
22 #include "ice_logs.h"
23 #include "base/ice_type.h"
24 #include "ice_switch_filter.h"
25
26 static int
27 ice_parse_switch_filter(const struct rte_flow_item pattern[],
28                         const struct rte_flow_action actions[],
29                         struct rte_flow_error *error,
30                         struct ice_adv_lkup_elem *list,
31                         uint16_t *lkups_num,
32                         enum ice_sw_tunnel_type tun_type)
33 {
34         const struct rte_flow_item *item = pattern;
35         enum rte_flow_item_type item_type;
36         const struct rte_flow_item_eth *eth_spec, *eth_mask;
37         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
38         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
39         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
40         const struct rte_flow_item_udp *udp_spec, *udp_mask;
41         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
42         const struct rte_flow_item_nvgre  *nvgre_spec, *nvgre_mask;
43         const struct rte_flow_item_vxlan  *vxlan_spec, *vxlan_mask;
44         uint16_t j, t = 0;
45         uint16_t tunnel_valid = 0;
46
47         for (item = pattern; item->type !=
48                         RTE_FLOW_ITEM_TYPE_END; item++) {
49                 item_type = item->type;
50
51                 switch (item_type) {
52                 case RTE_FLOW_ITEM_TYPE_ETH:
53                         eth_spec = item->spec;
54                         eth_mask = item->mask;
55                         if (eth_spec && eth_mask) {
56                                 list[t].type = (tun_type == ICE_NON_TUN) ?
57                                         ICE_MAC_OFOS : ICE_MAC_IL;
58                                 struct ice_ether_hdr *h;
59                                 struct ice_ether_hdr *m;
60                                 uint16_t i = 0;
61                                 h = &list[t].h_u.eth_hdr;
62                                 m = &list[t].m_u.eth_hdr;
63                                 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
64                                         if (eth_mask->src.addr_bytes[j] ==
65                                                                 UINT8_MAX) {
66                                                 h->src_addr[j] =
67                                                 eth_spec->src.addr_bytes[j];
68                                                 m->src_addr[j] =
69                                                 eth_mask->src.addr_bytes[j];
70                                                 i = 1;
71                                         }
72                                         if (eth_mask->dst.addr_bytes[j] ==
73                                                                 UINT8_MAX) {
74                                                 h->dst_addr[j] =
75                                                 eth_spec->dst.addr_bytes[j];
76                                                 m->dst_addr[j] =
77                                                 eth_mask->dst.addr_bytes[j];
78                                                 i = 1;
79                                         }
80                                 }
81                                 if (i)
82                                         t++;
83                                 if (eth_mask->type == UINT16_MAX) {
84                                         list[t].type = ICE_ETYPE_OL;
85                                         list[t].h_u.ethertype.ethtype_id =
86                                                 eth_spec->type;
87                                         list[t].m_u.ethertype.ethtype_id =
88                                                 UINT16_MAX;
89                                         t++;
90                                 }
91                         } else if (!eth_spec && !eth_mask) {
92                                 list[t].type = (tun_type == ICE_NON_TUN) ?
93                                         ICE_MAC_OFOS : ICE_MAC_IL;
94                         }
95                         break;
96
97                 case RTE_FLOW_ITEM_TYPE_IPV4:
98                         ipv4_spec = item->spec;
99                         ipv4_mask = item->mask;
100                         if (ipv4_spec && ipv4_mask) {
101                                 list[t].type = (tun_type == ICE_NON_TUN) ?
102                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
103                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
104                                         list[t].h_u.ipv4_hdr.src_addr =
105                                                 ipv4_spec->hdr.src_addr;
106                                         list[t].m_u.ipv4_hdr.src_addr =
107                                                 UINT32_MAX;
108                                 }
109                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
110                                         list[t].h_u.ipv4_hdr.dst_addr =
111                                                 ipv4_spec->hdr.dst_addr;
112                                         list[t].m_u.ipv4_hdr.dst_addr =
113                                                 UINT32_MAX;
114                                 }
115                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
116                                         list[t].h_u.ipv4_hdr.time_to_live =
117                                                 ipv4_spec->hdr.time_to_live;
118                                         list[t].m_u.ipv4_hdr.time_to_live =
119                                                 UINT8_MAX;
120                                 }
121                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
122                                         list[t].h_u.ipv4_hdr.protocol =
123                                                 ipv4_spec->hdr.next_proto_id;
124                                         list[t].m_u.ipv4_hdr.protocol =
125                                                 UINT8_MAX;
126                                 }
127                                 if (ipv4_mask->hdr.type_of_service ==
128                                                 UINT8_MAX) {
129                                         list[t].h_u.ipv4_hdr.tos =
130                                                 ipv4_spec->hdr.type_of_service;
131                                         list[t].m_u.ipv4_hdr.tos = UINT8_MAX;
132                                 }
133                                 t++;
134                         } else if (!ipv4_spec && !ipv4_mask) {
135                                 list[t].type = (tun_type == ICE_NON_TUN) ?
136                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
137                         }
138                         break;
139
140                 case RTE_FLOW_ITEM_TYPE_IPV6:
141                         ipv6_spec = item->spec;
142                         ipv6_mask = item->mask;
143                         if (ipv6_spec && ipv6_mask) {
144                                 list[t].type = (tun_type == ICE_NON_TUN) ?
145                                         ICE_IPV6_OFOS : ICE_IPV6_IL;
146                                 struct ice_ipv6_hdr *f;
147                                 struct ice_ipv6_hdr *s;
148                                 f = &list[t].h_u.ipv6_hdr;
149                                 s = &list[t].m_u.ipv6_hdr;
150                                 for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) {
151                                         if (ipv6_mask->hdr.src_addr[j] ==
152                                                                 UINT8_MAX) {
153                                                 f->src_addr[j] =
154                                                 ipv6_spec->hdr.src_addr[j];
155                                                 s->src_addr[j] =
156                                                 ipv6_mask->hdr.src_addr[j];
157                                         }
158                                         if (ipv6_mask->hdr.dst_addr[j] ==
159                                                                 UINT8_MAX) {
160                                                 f->dst_addr[j] =
161                                                 ipv6_spec->hdr.dst_addr[j];
162                                                 s->dst_addr[j] =
163                                                 ipv6_mask->hdr.dst_addr[j];
164                                         }
165                                 }
166                                 if (ipv6_mask->hdr.proto == UINT8_MAX) {
167                                         f->next_hdr =
168                                                 ipv6_spec->hdr.proto;
169                                         s->next_hdr = UINT8_MAX;
170                                 }
171                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
172                                         f->hop_limit =
173                                                 ipv6_spec->hdr.hop_limits;
174                                         s->hop_limit = UINT8_MAX;
175                                 }
176                                 t++;
177                         } else if (!ipv6_spec && !ipv6_mask) {
178                                 list[t].type = (tun_type == ICE_NON_TUN) ?
179                                         ICE_IPV4_OFOS : ICE_IPV4_IL;
180                         }
181                         break;
182
183                 case RTE_FLOW_ITEM_TYPE_UDP:
184                         udp_spec = item->spec;
185                         udp_mask = item->mask;
186                         if (udp_spec && udp_mask) {
187                                 if (tun_type == ICE_SW_TUN_VXLAN &&
188                                                 tunnel_valid == 0)
189                                         list[t].type = ICE_UDP_OF;
190                                 else
191                                         list[t].type = ICE_UDP_ILOS;
192                                 if (udp_mask->hdr.src_port == UINT16_MAX) {
193                                         list[t].h_u.l4_hdr.src_port =
194                                                 udp_spec->hdr.src_port;
195                                         list[t].m_u.l4_hdr.src_port =
196                                                 udp_mask->hdr.src_port;
197                                 }
198                                 if (udp_mask->hdr.dst_port == UINT16_MAX) {
199                                         list[t].h_u.l4_hdr.dst_port =
200                                                 udp_spec->hdr.dst_port;
201                                         list[t].m_u.l4_hdr.dst_port =
202                                                 udp_mask->hdr.dst_port;
203                                 }
204                                 t++;
205                         } else if (!udp_spec && !udp_mask) {
206                                 list[t].type = ICE_UDP_ILOS;
207                         }
208                         break;
209
210                 case RTE_FLOW_ITEM_TYPE_TCP:
211                         tcp_spec = item->spec;
212                         tcp_mask = item->mask;
213                         if (tcp_spec && tcp_mask) {
214                                 list[t].type = ICE_TCP_IL;
215                                 if (tcp_mask->hdr.src_port == UINT16_MAX) {
216                                         list[t].h_u.l4_hdr.src_port =
217                                                 tcp_spec->hdr.src_port;
218                                         list[t].m_u.l4_hdr.src_port =
219                                                 tcp_mask->hdr.src_port;
220                                 }
221                                 if (tcp_mask->hdr.dst_port == UINT16_MAX) {
222                                         list[t].h_u.l4_hdr.dst_port =
223                                                 tcp_spec->hdr.dst_port;
224                                         list[t].m_u.l4_hdr.dst_port =
225                                                 tcp_mask->hdr.dst_port;
226                                 }
227                                 t++;
228                         } else if (!tcp_spec && !tcp_mask) {
229                                 list[t].type = ICE_TCP_IL;
230                         }
231                         break;
232
233                 case RTE_FLOW_ITEM_TYPE_SCTP:
234                         sctp_spec = item->spec;
235                         sctp_mask = item->mask;
236                         if (sctp_spec && sctp_mask) {
237                                 list[t].type = ICE_SCTP_IL;
238                                 if (sctp_mask->hdr.src_port == UINT16_MAX) {
239                                         list[t].h_u.sctp_hdr.src_port =
240                                                 sctp_spec->hdr.src_port;
241                                         list[t].m_u.sctp_hdr.src_port =
242                                                 sctp_mask->hdr.src_port;
243                                 }
244                                 if (sctp_mask->hdr.dst_port == UINT16_MAX) {
245                                         list[t].h_u.sctp_hdr.dst_port =
246                                                 sctp_spec->hdr.dst_port;
247                                         list[t].m_u.sctp_hdr.dst_port =
248                                                 sctp_mask->hdr.dst_port;
249                                 }
250                                 t++;
251                         } else if (!sctp_spec && !sctp_mask) {
252                                 list[t].type = ICE_SCTP_IL;
253                         }
254                         break;
255
256                 case RTE_FLOW_ITEM_TYPE_VXLAN:
257                         vxlan_spec = item->spec;
258                         vxlan_mask = item->mask;
259                         tunnel_valid = 1;
260                         if (vxlan_spec && vxlan_mask) {
261                                 list[t].type = ICE_VXLAN;
262                                 if (vxlan_mask->vni[0] == UINT8_MAX &&
263                                         vxlan_mask->vni[1] == UINT8_MAX &&
264                                         vxlan_mask->vni[2] == UINT8_MAX) {
265                                         list[t].h_u.tnl_hdr.vni =
266                                                 (vxlan_spec->vni[2] << 16) |
267                                                 (vxlan_spec->vni[1] << 8) |
268                                                 vxlan_spec->vni[0];
269                                         list[t].m_u.tnl_hdr.vni =
270                                                 UINT32_MAX;
271                                 }
272                                 t++;
273                         } else if (!vxlan_spec && !vxlan_mask) {
274                                 list[t].type = ICE_VXLAN;
275                         }
276                         break;
277
278                 case RTE_FLOW_ITEM_TYPE_NVGRE:
279                         nvgre_spec = item->spec;
280                         nvgre_mask = item->mask;
281                         tunnel_valid = 1;
282                         if (nvgre_spec && nvgre_mask) {
283                                 list[t].type = ICE_NVGRE;
284                                 if (nvgre_mask->tni[0] == UINT8_MAX &&
285                                         nvgre_mask->tni[1] == UINT8_MAX &&
286                                         nvgre_mask->tni[2] == UINT8_MAX) {
287                                         list[t].h_u.nvgre_hdr.tni_flow =
288                                                 (nvgre_spec->tni[2] << 16) |
289                                                 (nvgre_spec->tni[1] << 8) |
290                                                 nvgre_spec->tni[0];
291                                         list[t].m_u.nvgre_hdr.tni_flow =
292                                                 UINT32_MAX;
293                                 }
294                                 t++;
295                         } else if (!nvgre_spec && !nvgre_mask) {
296                                 list[t].type = ICE_NVGRE;
297                         }
298                         break;
299
300                 case RTE_FLOW_ITEM_TYPE_VOID:
301                 case RTE_FLOW_ITEM_TYPE_END:
302                         break;
303
304                 default:
305                         rte_flow_error_set(error, EINVAL,
306                                    RTE_FLOW_ERROR_TYPE_ITEM, actions,
307                                    "Invalid pattern item.");
308                         goto out;
309                 }
310         }
311
312         *lkups_num = t;
313
314         return 0;
315 out:
316         return -rte_errno;
317 }
318
319 /* By now ice switch filter action code implement only
320  * supports QUEUE or DROP.
321  */
322 static int
323 ice_parse_switch_action(struct ice_pf *pf,
324                                  const struct rte_flow_action *actions,
325                                  struct rte_flow_error *error,
326                                  struct ice_adv_rule_info *rule_info)
327 {
328         struct ice_vsi *vsi = pf->main_vsi;
329         const struct rte_flow_action_queue *act_q;
330         uint16_t base_queue;
331         const struct rte_flow_action *action;
332         enum rte_flow_action_type action_type;
333
334         base_queue = pf->base_queue;
335         for (action = actions; action->type !=
336                         RTE_FLOW_ACTION_TYPE_END; action++) {
337                 action_type = action->type;
338                 switch (action_type) {
339                 case RTE_FLOW_ACTION_TYPE_QUEUE:
340                         act_q = action->conf;
341                         rule_info->sw_act.fltr_act =
342                                 ICE_FWD_TO_Q;
343                         rule_info->sw_act.fwd_id.q_id =
344                                 base_queue + act_q->index;
345                         break;
346
347                 case RTE_FLOW_ACTION_TYPE_DROP:
348                         rule_info->sw_act.fltr_act =
349                                 ICE_DROP_PACKET;
350                         break;
351
352                 case RTE_FLOW_ACTION_TYPE_VOID:
353                         break;
354
355                 default:
356                         rte_flow_error_set(error,
357                                 EINVAL,
358                                 RTE_FLOW_ERROR_TYPE_ITEM,
359                                 actions,
360                                 "Invalid action type");
361                         return -rte_errno;
362                 }
363         }
364
365         rule_info->sw_act.vsi_handle = vsi->idx;
366         rule_info->rx = 1;
367         rule_info->sw_act.src = vsi->idx;
368         rule_info->priority = 5;
369
370         return 0;
371 }
372
373 static int
374 ice_switch_rule_set(struct ice_pf *pf,
375                         struct ice_adv_lkup_elem *list,
376                         uint16_t lkups_cnt,
377                         struct ice_adv_rule_info *rule_info,
378                         struct rte_flow *flow,
379                         struct rte_flow_error *error)
380 {
381         struct ice_hw *hw = ICE_PF_TO_HW(pf);
382         int ret;
383         struct ice_rule_query_data rule_added = {0};
384         struct ice_rule_query_data *filter_ptr;
385
386         if (lkups_cnt > ICE_MAX_CHAIN_WORDS) {
387                 rte_flow_error_set(error, EINVAL,
388                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
389                         "item number too large for rule");
390                 return -rte_errno;
391         }
392         if (!list) {
393                 rte_flow_error_set(error, EINVAL,
394                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
395                         "lookup list should not be NULL");
396                 return -rte_errno;
397         }
398
399         ret = ice_add_adv_rule(hw, list, lkups_cnt, rule_info, &rule_added);
400
401         if (!ret) {
402                 filter_ptr = rte_zmalloc("ice_switch_filter",
403                         sizeof(struct ice_rule_query_data), 0);
404                 if (!filter_ptr) {
405                         PMD_DRV_LOG(ERR, "failed to allocate memory");
406                         return -EINVAL;
407                 }
408                 flow->rule = filter_ptr;
409                 rte_memcpy(filter_ptr,
410                         &rule_added,
411                         sizeof(struct ice_rule_query_data));
412         }
413
414         return ret;
415 }
416
417 int
418 ice_create_switch_filter(struct ice_pf *pf,
419                         const struct rte_flow_item pattern[],
420                         const struct rte_flow_action actions[],
421                         struct rte_flow *flow,
422                         struct rte_flow_error *error)
423 {
424         int ret = 0;
425         struct ice_adv_rule_info rule_info = {0};
426         struct ice_adv_lkup_elem *list = NULL;
427         uint16_t lkups_num = 0;
428         const struct rte_flow_item *item = pattern;
429         uint16_t item_num = 0;
430         enum ice_sw_tunnel_type tun_type = ICE_NON_TUN;
431
432         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
433                 item_num++;
434                 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
435                         tun_type = ICE_SW_TUN_VXLAN;
436                 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
437                         tun_type = ICE_SW_TUN_NVGRE;
438         }
439         rule_info.tun_type = tun_type;
440
441         list = rte_zmalloc(NULL, item_num * sizeof(*list), 0);
442         if (!list) {
443                 rte_flow_error_set(error, EINVAL,
444                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
445                                    "No memory for PMD internal items");
446                 return -rte_errno;
447         }
448
449         ret = ice_parse_switch_filter(pattern, actions, error,
450                         list, &lkups_num, tun_type);
451         if (ret)
452                 goto error;
453
454         ret = ice_parse_switch_action(pf, actions, error, &rule_info);
455         if (ret)
456                 goto error;
457
458         ret = ice_switch_rule_set(pf, list, lkups_num, &rule_info, flow, error);
459         if (ret)
460                 goto error;
461
462         rte_free(list);
463         return 0;
464
465 error:
466         rte_free(list);
467
468         return -rte_errno;
469 }
470
471 int
472 ice_destroy_switch_filter(struct ice_pf *pf,
473                         struct rte_flow *flow,
474                         struct rte_flow_error *error)
475 {
476         struct ice_hw *hw = ICE_PF_TO_HW(pf);
477         int ret;
478         struct ice_rule_query_data *filter_ptr;
479
480         filter_ptr = (struct ice_rule_query_data *)
481                         flow->rule;
482
483         if (!filter_ptr) {
484                 rte_flow_error_set(error, EINVAL,
485                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
486                         "no such flow"
487                         " create by switch filter");
488                 return -rte_errno;
489         }
490
491         ret = ice_rem_adv_rule_by_id(hw, filter_ptr);
492         if (ret) {
493                 rte_flow_error_set(error, EINVAL,
494                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
495                         "fail to destroy switch filter rule");
496                 return -rte_errno;
497         }
498
499         rte_free(filter_ptr);
500         return ret;
501 }
502
503 void
504 ice_free_switch_filter_rule(void *rule)
505 {
506         struct ice_rule_query_data *filter_ptr;
507
508         filter_ptr = (struct ice_rule_query_data *)rule;
509
510         rte_free(filter_ptr);
511 }