1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
5 #include "rte_eth_softnic_internals.h"
6 #include "rte_eth_softnic.h"
9 flow_attr_map_set(struct pmd_internals *softnic,
12 const char *pipeline_name,
15 struct pipeline *pipeline;
16 struct flow_attr_map *map;
18 if (group_id >= SOFTNIC_FLOW_MAX_GROUPS ||
19 pipeline_name == NULL)
22 pipeline = softnic_pipeline_find(softnic, pipeline_name);
23 if (pipeline == NULL ||
24 table_id >= pipeline->n_tables)
27 map = (ingress) ? &softnic->flow.ingress_map[group_id] :
28 &softnic->flow.egress_map[group_id];
29 strcpy(map->pipeline_name, pipeline_name);
30 map->table_id = table_id;
36 struct flow_attr_map *
37 flow_attr_map_get(struct pmd_internals *softnic,
41 if (group_id >= SOFTNIC_FLOW_MAX_GROUPS)
44 return (ingress) ? &softnic->flow.ingress_map[group_id] :
45 &softnic->flow.egress_map[group_id];
49 flow_pipeline_table_get(struct pmd_internals *softnic,
50 const struct rte_flow_attr *attr,
51 const char **pipeline_name,
53 struct rte_flow_error *error)
55 struct flow_attr_map *map;
58 return rte_flow_error_set(error,
60 RTE_FLOW_ERROR_TYPE_ATTR,
64 if (!attr->ingress && !attr->egress)
65 return rte_flow_error_set(error,
67 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
69 "Ingress/egress not specified");
71 if (attr->ingress && attr->egress)
72 return rte_flow_error_set(error,
74 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
76 "Setting both ingress and egress is not allowed");
78 map = flow_attr_map_get(softnic,
83 return rte_flow_error_set(error,
85 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
90 *pipeline_name = map->pipeline_name;
93 *table_id = map->table_id;
99 uint8_t raw[TABLE_RULE_MATCH_SIZE_MAX];
100 struct rte_flow_item_eth eth;
101 struct rte_flow_item_vlan vlan;
102 struct rte_flow_item_ipv4 ipv4;
103 struct rte_flow_item_ipv6 ipv6;
104 struct rte_flow_item_icmp icmp;
105 struct rte_flow_item_udp udp;
106 struct rte_flow_item_tcp tcp;
107 struct rte_flow_item_sctp sctp;
108 struct rte_flow_item_vxlan vxlan;
109 struct rte_flow_item_e_tag e_tag;
110 struct rte_flow_item_nvgre nvgre;
111 struct rte_flow_item_mpls mpls;
112 struct rte_flow_item_gre gre;
113 struct rte_flow_item_gtp gtp;
114 struct rte_flow_item_esp esp;
115 struct rte_flow_item_geneve geneve;
116 struct rte_flow_item_vxlan_gpe vxlan_gpe;
117 struct rte_flow_item_arp_eth_ipv4 arp_eth_ipv4;
118 struct rte_flow_item_ipv6_ext ipv6_ext;
119 struct rte_flow_item_icmp6 icmp6;
120 struct rte_flow_item_icmp6_nd_ns icmp6_nd_ns;
121 struct rte_flow_item_icmp6_nd_na icmp6_nd_na;
122 struct rte_flow_item_icmp6_nd_opt icmp6_nd_opt;
123 struct rte_flow_item_icmp6_nd_opt_sla_eth icmp6_nd_opt_sla_eth;
124 struct rte_flow_item_icmp6_nd_opt_tla_eth icmp6_nd_opt_tla_eth;
127 static const union flow_item flow_item_raw_mask;
130 flow_item_is_proto(enum rte_flow_item_type type,
135 case RTE_FLOW_ITEM_TYPE_RAW:
136 *mask = &flow_item_raw_mask;
137 *size = sizeof(flow_item_raw_mask);
140 case RTE_FLOW_ITEM_TYPE_ETH:
141 *mask = &rte_flow_item_eth_mask;
142 *size = sizeof(struct rte_flow_item_eth);
145 case RTE_FLOW_ITEM_TYPE_VLAN:
146 *mask = &rte_flow_item_vlan_mask;
147 *size = sizeof(struct rte_flow_item_vlan);
150 case RTE_FLOW_ITEM_TYPE_IPV4:
151 *mask = &rte_flow_item_ipv4_mask;
152 *size = sizeof(struct rte_flow_item_ipv4);
155 case RTE_FLOW_ITEM_TYPE_IPV6:
156 *mask = &rte_flow_item_ipv6_mask;
157 *size = sizeof(struct rte_flow_item_ipv6);
160 case RTE_FLOW_ITEM_TYPE_ICMP:
161 *mask = &rte_flow_item_icmp_mask;
162 *size = sizeof(struct rte_flow_item_icmp);
165 case RTE_FLOW_ITEM_TYPE_UDP:
166 *mask = &rte_flow_item_udp_mask;
167 *size = sizeof(struct rte_flow_item_udp);
170 case RTE_FLOW_ITEM_TYPE_TCP:
171 *mask = &rte_flow_item_tcp_mask;
172 *size = sizeof(struct rte_flow_item_tcp);
175 case RTE_FLOW_ITEM_TYPE_SCTP:
176 *mask = &rte_flow_item_sctp_mask;
177 *size = sizeof(struct rte_flow_item_sctp);
180 case RTE_FLOW_ITEM_TYPE_VXLAN:
181 *mask = &rte_flow_item_vxlan_mask;
182 *size = sizeof(struct rte_flow_item_vxlan);
185 case RTE_FLOW_ITEM_TYPE_E_TAG:
186 *mask = &rte_flow_item_e_tag_mask;
187 *size = sizeof(struct rte_flow_item_e_tag);
190 case RTE_FLOW_ITEM_TYPE_NVGRE:
191 *mask = &rte_flow_item_nvgre_mask;
192 *size = sizeof(struct rte_flow_item_nvgre);
195 case RTE_FLOW_ITEM_TYPE_MPLS:
196 *mask = &rte_flow_item_mpls_mask;
197 *size = sizeof(struct rte_flow_item_mpls);
200 case RTE_FLOW_ITEM_TYPE_GRE:
201 *mask = &rte_flow_item_gre_mask;
202 *size = sizeof(struct rte_flow_item_gre);
205 case RTE_FLOW_ITEM_TYPE_GTP:
206 case RTE_FLOW_ITEM_TYPE_GTPC:
207 case RTE_FLOW_ITEM_TYPE_GTPU:
208 *mask = &rte_flow_item_gtp_mask;
209 *size = sizeof(struct rte_flow_item_gtp);
212 case RTE_FLOW_ITEM_TYPE_ESP:
213 *mask = &rte_flow_item_esp_mask;
214 *size = sizeof(struct rte_flow_item_esp);
217 case RTE_FLOW_ITEM_TYPE_GENEVE:
218 *mask = &rte_flow_item_geneve_mask;
219 *size = sizeof(struct rte_flow_item_geneve);
222 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
223 *mask = &rte_flow_item_vxlan_gpe_mask;
224 *size = sizeof(struct rte_flow_item_vxlan_gpe);
227 case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4:
228 *mask = &rte_flow_item_arp_eth_ipv4_mask;
229 *size = sizeof(struct rte_flow_item_arp_eth_ipv4);
232 case RTE_FLOW_ITEM_TYPE_IPV6_EXT:
233 *mask = &rte_flow_item_ipv6_ext_mask;
234 *size = sizeof(struct rte_flow_item_ipv6_ext);
237 case RTE_FLOW_ITEM_TYPE_ICMP6:
238 *mask = &rte_flow_item_icmp6_mask;
239 *size = sizeof(struct rte_flow_item_icmp6);
242 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS:
243 *mask = &rte_flow_item_icmp6_nd_ns_mask;
244 *size = sizeof(struct rte_flow_item_icmp6_nd_ns);
247 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA:
248 *mask = &rte_flow_item_icmp6_nd_na_mask;
249 *size = sizeof(struct rte_flow_item_icmp6_nd_na);
252 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT:
253 *mask = &rte_flow_item_icmp6_nd_opt_mask;
254 *size = sizeof(struct rte_flow_item_icmp6_nd_opt);
257 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH:
258 *mask = &rte_flow_item_icmp6_nd_opt_sla_eth_mask;
259 *size = sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth);
262 case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH:
263 *mask = &rte_flow_item_icmp6_nd_opt_tla_eth_mask;
264 *size = sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth);
267 default: return 0; /* FALSE */
272 flow_item_proto_preprocess(const struct rte_flow_item *item,
273 union flow_item *item_spec,
274 union flow_item *item_mask,
277 struct rte_flow_error *error)
279 const void *mask_default;
280 uint8_t *spec = (uint8_t *)item_spec;
281 uint8_t *mask = (uint8_t *)item_mask;
284 if (!flow_item_is_proto(item->type, &mask_default, &size))
285 return rte_flow_error_set(error,
287 RTE_FLOW_ERROR_TYPE_ITEM,
289 "Item type not supported");
293 /* If spec is NULL, then last and mask also have to be NULL. */
294 if (item->last || item->mask)
295 return rte_flow_error_set(error,
297 RTE_FLOW_ERROR_TYPE_ITEM,
299 "Invalid item (NULL spec with non-NULL last or mask)");
301 memset(item_spec, 0, size);
302 memset(item_mask, 0, size);
304 *item_disabled = 1; /* TRUE */
308 memcpy(spec, item->spec, size);
313 memcpy(mask, item->mask, size);
315 memcpy(mask, mask_default, size);
318 for (i = 0; i < size; i++)
321 *item_disabled = (i == size) ? 1 : 0;
323 /* Apply mask over spec. */
324 for (i = 0; i < size; i++)
332 memcpy(last, item->last, size);
333 for (i = 0; i < size; i++)
336 /* check for range */
337 for (i = 0; i < size; i++)
338 if (last[i] != spec[i])
339 return rte_flow_error_set(error,
341 RTE_FLOW_ERROR_TYPE_ITEM,
343 "Range not supported");
350 * Skip disabled protocol items and VOID items
351 * until any of the mutually exclusive conditions
352 * from the list below takes place:
353 * (A) A protocol present in the proto_mask
354 * is met (either ENABLED or DISABLED);
355 * (B) A protocol NOT present in the proto_mask is met in ENABLED state;
356 * (C) The END item is met.
359 flow_item_skip_disabled_protos(const struct rte_flow_item **item,
362 struct rte_flow_error *error)
366 for ( ; (*item)->type != RTE_FLOW_ITEM_TYPE_END; (*item)++) {
367 union flow_item spec, mask;
369 int disabled = 0, status;
371 if ((*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
374 status = flow_item_proto_preprocess(*item,
383 if ((proto_mask & (1LLU << (*item)->type)) ||
396 #define FLOW_ITEM_PROTO_IP \
397 ((1LLU << RTE_FLOW_ITEM_TYPE_IPV4) | \
398 (1LLU << RTE_FLOW_ITEM_TYPE_IPV6))
401 flow_rule_match_acl_get(struct pmd_internals *softnic __rte_unused,
402 struct pipeline *pipeline __rte_unused,
403 struct softnic_table *table __rte_unused,
404 const struct rte_flow_attr *attr,
405 const struct rte_flow_item *item,
406 struct softnic_table_rule_match *rule_match,
407 struct rte_flow_error *error)
409 union flow_item spec, mask;
410 size_t size, length = 0;
411 int disabled = 0, status;
413 memset(rule_match, 0, sizeof(*rule_match));
414 rule_match->match_type = TABLE_ACL;
415 rule_match->match.acl.priority = attr->priority;
417 /* VOID or disabled protos only, if any. */
418 status = flow_item_skip_disabled_protos(&item,
419 FLOW_ITEM_PROTO_IP, &length, error);
424 status = flow_item_proto_preprocess(item, &spec, &mask,
425 &size, &disabled, error);
429 switch (item->type) {
431 return rte_flow_error_set(error,
433 RTE_FLOW_ERROR_TYPE_ITEM,
435 "ACL: IP protocol required");
440 flow_rule_match_get(struct pmd_internals *softnic,
441 struct pipeline *pipeline,
442 struct softnic_table *table,
443 const struct rte_flow_attr *attr,
444 const struct rte_flow_item *item,
445 struct softnic_table_rule_match *rule_match,
446 struct rte_flow_error *error)
448 switch (table->params.match_type) {
450 return flow_rule_match_acl_get(softnic,
459 return rte_flow_error_set(error,
461 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
463 "Unsupported pipeline table match type");
468 pmd_flow_validate(struct rte_eth_dev *dev,
469 const struct rte_flow_attr *attr,
470 const struct rte_flow_item item[],
471 const struct rte_flow_action action[],
472 struct rte_flow_error *error)
474 struct softnic_table_rule_match rule_match;
476 struct pmd_internals *softnic = dev->data->dev_private;
477 struct pipeline *pipeline;
478 struct softnic_table *table;
479 const char *pipeline_name = NULL;
480 uint32_t table_id = 0;
483 /* Check input parameters. */
485 return rte_flow_error_set(error,
487 RTE_FLOW_ERROR_TYPE_ATTR,
491 return rte_flow_error_set(error,
493 RTE_FLOW_ERROR_TYPE_ITEM,
498 return rte_flow_error_set(error,
500 RTE_FLOW_ERROR_TYPE_ACTION,
504 /* Identify the pipeline table to add this flow to. */
505 status = flow_pipeline_table_get(softnic, attr, &pipeline_name,
510 pipeline = softnic_pipeline_find(softnic, pipeline_name);
511 if (pipeline == NULL)
512 return rte_flow_error_set(error,
514 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
516 "Invalid pipeline name");
518 if (table_id >= pipeline->n_tables)
519 return rte_flow_error_set(error,
521 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
523 "Invalid pipeline table ID");
525 table = &pipeline->table[table_id];
528 memset(&rule_match, 0, sizeof(rule_match));
529 status = flow_rule_match_get(softnic,
542 const struct rte_flow_ops pmd_flow_ops = {
543 .validate = pmd_flow_validate,