2 * Copyright (c) 2017 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <rte_tailq.h>
31 #include <rte_common.h>
32 #include <rte_ethdev.h>
33 #include <rte_eth_ctrl.h>
34 #include <rte_ether.h>
36 #include <rte_flow_driver.h>
42 #include "sfc_filter.h"
47 * At now flow API is implemented in such a manner that each
48 * flow rule is converted to a hardware filter.
49 * All elements of flow rule (attributes, pattern items, actions)
50 * correspond to one or more fields in the efx_filter_spec_s structure
51 * that is responsible for the hardware filter.
54 enum sfc_flow_item_layers {
55 SFC_FLOW_ITEM_ANY_LAYER,
56 SFC_FLOW_ITEM_START_LAYER,
60 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
61 efx_filter_spec_t *spec,
62 struct rte_flow_error *error);
64 struct sfc_flow_item {
65 enum rte_flow_item_type type; /* Type of item */
66 enum sfc_flow_item_layers layer; /* Layer of item */
67 enum sfc_flow_item_layers prev_layer; /* Previous layer of item */
68 sfc_flow_item_parse *parse; /* Parsing function */
71 static sfc_flow_item_parse sfc_flow_parse_void;
72 static sfc_flow_item_parse sfc_flow_parse_eth;
73 static sfc_flow_item_parse sfc_flow_parse_vlan;
76 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
81 for (i = 0; i < size; i++)
84 return (sum == 0) ? B_TRUE : B_FALSE;
88 * Validate item and prepare structures spec and mask for parsing
91 sfc_flow_parse_init(const struct rte_flow_item *item,
92 const void **spec_ptr,
93 const void **mask_ptr,
94 const void *supp_mask,
97 struct rte_flow_error *error)
107 rte_flow_error_set(error, EINVAL,
108 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
113 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
114 rte_flow_error_set(error, EINVAL,
115 RTE_FLOW_ERROR_TYPE_ITEM, item,
116 "Mask or last is set without spec");
121 * If "mask" is not set, default mask is used,
122 * but if default mask is NULL, "mask" should be set
124 if (item->mask == NULL) {
125 if (def_mask == NULL) {
126 rte_flow_error_set(error, EINVAL,
127 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
128 "Mask should be specified");
132 mask = (const uint8_t *)def_mask;
134 mask = (const uint8_t *)item->mask;
137 spec = (const uint8_t *)item->spec;
138 last = (const uint8_t *)item->last;
144 * If field values in "last" are either 0 or equal to the corresponding
145 * values in "spec" then they are ignored
148 !sfc_flow_is_zero(last, size) &&
149 memcmp(last, spec, size) != 0) {
150 rte_flow_error_set(error, ENOTSUP,
151 RTE_FLOW_ERROR_TYPE_ITEM, item,
152 "Ranging is not supported");
156 if (supp_mask == NULL) {
157 rte_flow_error_set(error, EINVAL,
158 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
159 "Supported mask for item should be specified");
163 /* Check that mask and spec not asks for more match than supp_mask */
164 for (i = 0; i < size; i++) {
165 match = spec[i] | mask[i];
166 supp = ((const uint8_t *)supp_mask)[i];
168 if ((match | supp) != supp) {
169 rte_flow_error_set(error, ENOTSUP,
170 RTE_FLOW_ERROR_TYPE_ITEM, item,
171 "Item's field is not supported");
184 * Masking is not supported, so masks in items should be either
185 * full or empty (zeroed) and set only for supported fields which
186 * are specified in the supp_mask.
190 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
191 __rte_unused efx_filter_spec_t *efx_spec,
192 __rte_unused struct rte_flow_error *error)
198 * Convert Ethernet item to EFX filter specification.
201 * Item specification. Only source and destination addresses and
202 * Ethernet type fields are supported. If the mask is NULL, default
203 * mask will be used. Ranging is not supported.
204 * @param efx_spec[in, out]
205 * EFX filter specification to update.
207 * Perform verbose error reporting if not NULL.
210 sfc_flow_parse_eth(const struct rte_flow_item *item,
211 efx_filter_spec_t *efx_spec,
212 struct rte_flow_error *error)
215 const struct rte_flow_item_eth *spec = NULL;
216 const struct rte_flow_item_eth *mask = NULL;
217 const struct rte_flow_item_eth supp_mask = {
218 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
219 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
223 rc = sfc_flow_parse_init(item,
224 (const void **)&spec,
225 (const void **)&mask,
227 &rte_flow_item_eth_mask,
228 sizeof(struct rte_flow_item_eth),
233 /* If "spec" is not set, could be any Ethernet */
237 if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
238 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
239 rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
241 } else if (!is_zero_ether_addr(&mask->dst)) {
245 if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
246 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
247 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
249 } else if (!is_zero_ether_addr(&mask->src)) {
254 * Ether type is in big-endian byte order in item and
255 * in little-endian in efx_spec, so byte swap is used
257 if (mask->type == supp_mask.type) {
258 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
259 efx_spec->efs_ether_type = rte_bswap16(spec->type);
260 } else if (mask->type != 0) {
267 rte_flow_error_set(error, EINVAL,
268 RTE_FLOW_ERROR_TYPE_ITEM, item,
269 "Bad mask in the ETH pattern item");
274 * Convert VLAN item to EFX filter specification.
277 * Item specification. Only VID field is supported.
278 * The mask can not be NULL. Ranging is not supported.
279 * @param efx_spec[in, out]
280 * EFX filter specification to update.
282 * Perform verbose error reporting if not NULL.
285 sfc_flow_parse_vlan(const struct rte_flow_item *item,
286 efx_filter_spec_t *efx_spec,
287 struct rte_flow_error *error)
291 const struct rte_flow_item_vlan *spec = NULL;
292 const struct rte_flow_item_vlan *mask = NULL;
293 const struct rte_flow_item_vlan supp_mask = {
294 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
297 rc = sfc_flow_parse_init(item,
298 (const void **)&spec,
299 (const void **)&mask,
302 sizeof(struct rte_flow_item_vlan),
308 * VID is in big-endian byte order in item and
309 * in little-endian in efx_spec, so byte swap is used.
310 * If two VLAN items are included, the first matches
311 * the outer tag and the next matches the inner tag.
313 if (mask->tci == supp_mask.tci) {
314 vid = rte_bswap16(spec->tci);
316 if (!(efx_spec->efs_match_flags &
317 EFX_FILTER_MATCH_OUTER_VID)) {
318 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
319 efx_spec->efs_outer_vid = vid;
320 } else if (!(efx_spec->efs_match_flags &
321 EFX_FILTER_MATCH_INNER_VID)) {
322 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
323 efx_spec->efs_inner_vid = vid;
325 rte_flow_error_set(error, EINVAL,
326 RTE_FLOW_ERROR_TYPE_ITEM, item,
327 "More than two VLAN items");
331 rte_flow_error_set(error, EINVAL,
332 RTE_FLOW_ERROR_TYPE_ITEM, item,
333 "VLAN ID in TCI match is required");
340 static const struct sfc_flow_item sfc_flow_items[] = {
342 .type = RTE_FLOW_ITEM_TYPE_VOID,
343 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
344 .layer = SFC_FLOW_ITEM_ANY_LAYER,
345 .parse = sfc_flow_parse_void,
348 .type = RTE_FLOW_ITEM_TYPE_ETH,
349 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
350 .layer = SFC_FLOW_ITEM_L2,
351 .parse = sfc_flow_parse_eth,
354 .type = RTE_FLOW_ITEM_TYPE_VLAN,
355 .prev_layer = SFC_FLOW_ITEM_L2,
356 .layer = SFC_FLOW_ITEM_L2,
357 .parse = sfc_flow_parse_vlan,
362 * Protocol-independent flow API support
365 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
366 struct rte_flow *flow,
367 struct rte_flow_error *error)
370 rte_flow_error_set(error, EINVAL,
371 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
375 if (attr->group != 0) {
376 rte_flow_error_set(error, ENOTSUP,
377 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
378 "Groups are not supported");
381 if (attr->priority != 0) {
382 rte_flow_error_set(error, ENOTSUP,
383 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
384 "Priorities are not supported");
387 if (attr->egress != 0) {
388 rte_flow_error_set(error, ENOTSUP,
389 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
390 "Egress is not supported");
393 if (attr->ingress == 0) {
394 rte_flow_error_set(error, ENOTSUP,
395 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
396 "Only ingress is supported");
400 flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
401 flow->spec.efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
406 /* Get item from array sfc_flow_items */
407 static const struct sfc_flow_item *
408 sfc_flow_get_item(enum rte_flow_item_type type)
412 for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
413 if (sfc_flow_items[i].type == type)
414 return &sfc_flow_items[i];
420 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
421 struct rte_flow *flow,
422 struct rte_flow_error *error)
425 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
426 const struct sfc_flow_item *item;
428 if (pattern == NULL) {
429 rte_flow_error_set(error, EINVAL,
430 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
435 for (; pattern != NULL &&
436 pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
437 item = sfc_flow_get_item(pattern->type);
439 rte_flow_error_set(error, ENOTSUP,
440 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
441 "Unsupported pattern item");
446 * Omitting one or several protocol layers at the beginning
447 * of pattern is supported
449 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
450 prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
451 item->prev_layer != prev_layer) {
452 rte_flow_error_set(error, ENOTSUP,
453 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
454 "Unexpected sequence of pattern items");
458 rc = item->parse(pattern, &flow->spec, error);
462 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
463 prev_layer = item->layer;
466 if (pattern == NULL) {
467 rte_flow_error_set(error, EINVAL,
468 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
477 sfc_flow_parse_queue(struct sfc_adapter *sa,
478 const struct rte_flow_action_queue *queue,
479 struct rte_flow *flow)
483 if (queue->index >= sa->rxq_count)
486 rxq = sa->rxq_info[queue->index].rxq;
487 flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
493 sfc_flow_parse_actions(struct sfc_adapter *sa,
494 const struct rte_flow_action actions[],
495 struct rte_flow *flow,
496 struct rte_flow_error *error)
499 boolean_t is_specified = B_FALSE;
501 if (actions == NULL) {
502 rte_flow_error_set(error, EINVAL,
503 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
508 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
509 switch (actions->type) {
510 case RTE_FLOW_ACTION_TYPE_VOID:
513 case RTE_FLOW_ACTION_TYPE_QUEUE:
514 rc = sfc_flow_parse_queue(sa, actions->conf, flow);
516 rte_flow_error_set(error, EINVAL,
517 RTE_FLOW_ERROR_TYPE_ACTION, actions,
522 is_specified = B_TRUE;
526 rte_flow_error_set(error, ENOTSUP,
527 RTE_FLOW_ERROR_TYPE_ACTION, actions,
528 "Action is not supported");
534 rte_flow_error_set(error, EINVAL,
535 RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
536 "Action is unspecified");
544 sfc_flow_parse(struct rte_eth_dev *dev,
545 const struct rte_flow_attr *attr,
546 const struct rte_flow_item pattern[],
547 const struct rte_flow_action actions[],
548 struct rte_flow *flow,
549 struct rte_flow_error *error)
551 struct sfc_adapter *sa = dev->data->dev_private;
554 memset(&flow->spec, 0, sizeof(flow->spec));
556 rc = sfc_flow_parse_attr(attr, flow, error);
560 rc = sfc_flow_parse_pattern(pattern, flow, error);
564 rc = sfc_flow_parse_actions(sa, actions, flow, error);
568 if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
569 rte_flow_error_set(error, ENOTSUP,
570 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
571 "Flow rule pattern is not supported");
580 sfc_flow_validate(struct rte_eth_dev *dev,
581 const struct rte_flow_attr *attr,
582 const struct rte_flow_item pattern[],
583 const struct rte_flow_action actions[],
584 struct rte_flow_error *error)
586 struct rte_flow flow;
588 return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
591 static struct rte_flow *
592 sfc_flow_create(struct rte_eth_dev *dev,
593 const struct rte_flow_attr *attr,
594 const struct rte_flow_item pattern[],
595 const struct rte_flow_action actions[],
596 struct rte_flow_error *error)
598 struct sfc_adapter *sa = dev->data->dev_private;
599 struct rte_flow *flow = NULL;
602 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
604 rte_flow_error_set(error, ENOMEM,
605 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
606 "Failed to allocate memory");
610 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
614 TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
616 sfc_adapter_lock(sa);
618 if (sa->state == SFC_ADAPTER_STARTED) {
619 rc = efx_filter_insert(sa->nic, &flow->spec);
621 rte_flow_error_set(error, rc,
622 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
623 "Failed to insert filter");
624 goto fail_filter_insert;
628 sfc_adapter_unlock(sa);
633 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
637 sfc_adapter_unlock(sa);
644 sfc_flow_remove(struct sfc_adapter *sa,
645 struct rte_flow *flow,
646 struct rte_flow_error *error)
650 SFC_ASSERT(sfc_adapter_is_locked(sa));
652 if (sa->state == SFC_ADAPTER_STARTED) {
653 rc = efx_filter_remove(sa->nic, &flow->spec);
655 rte_flow_error_set(error, rc,
656 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
657 "Failed to destroy flow rule");
660 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
667 sfc_flow_destroy(struct rte_eth_dev *dev,
668 struct rte_flow *flow,
669 struct rte_flow_error *error)
671 struct sfc_adapter *sa = dev->data->dev_private;
672 struct rte_flow *flow_ptr;
675 sfc_adapter_lock(sa);
677 TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
678 if (flow_ptr == flow)
682 rte_flow_error_set(error, rc,
683 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
684 "Failed to find flow rule to destroy");
688 rc = sfc_flow_remove(sa, flow, error);
691 sfc_adapter_unlock(sa);
697 sfc_flow_flush(struct rte_eth_dev *dev,
698 struct rte_flow_error *error)
700 struct sfc_adapter *sa = dev->data->dev_private;
701 struct rte_flow *flow;
705 sfc_adapter_lock(sa);
707 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
708 rc = sfc_flow_remove(sa, flow, error);
713 sfc_adapter_unlock(sa);
718 const struct rte_flow_ops sfc_flow_ops = {
719 .validate = sfc_flow_validate,
720 .create = sfc_flow_create,
721 .destroy = sfc_flow_destroy,
722 .flush = sfc_flow_flush,
727 sfc_flow_init(struct sfc_adapter *sa)
729 SFC_ASSERT(sfc_adapter_is_locked(sa));
731 TAILQ_INIT(&sa->filter.flow_list);
735 sfc_flow_fini(struct sfc_adapter *sa)
737 struct rte_flow *flow;
739 SFC_ASSERT(sfc_adapter_is_locked(sa));
741 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
742 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
748 sfc_flow_stop(struct sfc_adapter *sa)
750 struct rte_flow *flow;
752 SFC_ASSERT(sfc_adapter_is_locked(sa));
754 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
755 efx_filter_remove(sa->nic, &flow->spec);
759 sfc_flow_start(struct sfc_adapter *sa)
761 struct rte_flow *flow;
764 sfc_log_init(sa, "entry");
766 SFC_ASSERT(sfc_adapter_is_locked(sa));
768 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
769 rc = efx_filter_insert(sa->nic, &flow->spec);
774 sfc_log_init(sa, "done");