2 * Copyright (c) 2017 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <rte_tailq.h>
31 #include <rte_common.h>
32 #include <rte_ethdev.h>
33 #include <rte_eth_ctrl.h>
34 #include <rte_ether.h>
36 #include <rte_flow_driver.h>
42 #include "sfc_filter.h"
47 * At now flow API is implemented in such a manner that each
48 * flow rule is converted to a hardware filter.
49 * All elements of flow rule (attributes, pattern items, actions)
50 * correspond to one or more fields in the efx_filter_spec_s structure
51 * that is responsible for the hardware filter.
54 enum sfc_flow_item_layers {
55 SFC_FLOW_ITEM_ANY_LAYER,
56 SFC_FLOW_ITEM_START_LAYER,
60 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
61 efx_filter_spec_t *spec,
62 struct rte_flow_error *error);
64 struct sfc_flow_item {
65 enum rte_flow_item_type type; /* Type of item */
66 enum sfc_flow_item_layers layer; /* Layer of item */
67 enum sfc_flow_item_layers prev_layer; /* Previous layer of item */
68 sfc_flow_item_parse *parse; /* Parsing function */
71 static sfc_flow_item_parse sfc_flow_parse_void;
72 static sfc_flow_item_parse sfc_flow_parse_eth;
75 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
80 for (i = 0; i < size; i++)
83 return (sum == 0) ? B_TRUE : B_FALSE;
87 * Validate item and prepare structures spec and mask for parsing
90 sfc_flow_parse_init(const struct rte_flow_item *item,
91 const void **spec_ptr,
92 const void **mask_ptr,
93 const void *supp_mask,
96 struct rte_flow_error *error)
106 rte_flow_error_set(error, EINVAL,
107 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
112 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
113 rte_flow_error_set(error, EINVAL,
114 RTE_FLOW_ERROR_TYPE_ITEM, item,
115 "Mask or last is set without spec");
120 * If "mask" is not set, default mask is used,
121 * but if default mask is NULL, "mask" should be set
123 if (item->mask == NULL) {
124 if (def_mask == NULL) {
125 rte_flow_error_set(error, EINVAL,
126 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
127 "Mask should be specified");
131 mask = (const uint8_t *)def_mask;
133 mask = (const uint8_t *)item->mask;
136 spec = (const uint8_t *)item->spec;
137 last = (const uint8_t *)item->last;
143 * If field values in "last" are either 0 or equal to the corresponding
144 * values in "spec" then they are ignored
147 !sfc_flow_is_zero(last, size) &&
148 memcmp(last, spec, size) != 0) {
149 rte_flow_error_set(error, ENOTSUP,
150 RTE_FLOW_ERROR_TYPE_ITEM, item,
151 "Ranging is not supported");
155 if (supp_mask == NULL) {
156 rte_flow_error_set(error, EINVAL,
157 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
158 "Supported mask for item should be specified");
162 /* Check that mask and spec not asks for more match than supp_mask */
163 for (i = 0; i < size; i++) {
164 match = spec[i] | mask[i];
165 supp = ((const uint8_t *)supp_mask)[i];
167 if ((match | supp) != supp) {
168 rte_flow_error_set(error, ENOTSUP,
169 RTE_FLOW_ERROR_TYPE_ITEM, item,
170 "Item's field is not supported");
183 * Masking is not supported, so masks in items should be either
184 * full or empty (zeroed) and set only for supported fields which
185 * are specified in the supp_mask.
189 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
190 __rte_unused efx_filter_spec_t *efx_spec,
191 __rte_unused struct rte_flow_error *error)
197 * Convert Ethernet item to EFX filter specification.
200 * Item specification. Only source and destination addresses and
201 * Ethernet type fields are supported. If the mask is NULL, default
202 * mask will be used. Ranging is not supported.
203 * @param efx_spec[in, out]
204 * EFX filter specification to update.
206 * Perform verbose error reporting if not NULL.
209 sfc_flow_parse_eth(const struct rte_flow_item *item,
210 efx_filter_spec_t *efx_spec,
211 struct rte_flow_error *error)
214 const struct rte_flow_item_eth *spec = NULL;
215 const struct rte_flow_item_eth *mask = NULL;
216 const struct rte_flow_item_eth supp_mask = {
217 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
218 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
222 rc = sfc_flow_parse_init(item,
223 (const void **)&spec,
224 (const void **)&mask,
226 &rte_flow_item_eth_mask,
227 sizeof(struct rte_flow_item_eth),
232 /* If "spec" is not set, could be any Ethernet */
236 if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
237 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
238 rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
240 } else if (!is_zero_ether_addr(&mask->dst)) {
244 if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
245 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
246 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
248 } else if (!is_zero_ether_addr(&mask->src)) {
253 * Ether type is in big-endian byte order in item and
254 * in little-endian in efx_spec, so byte swap is used
256 if (mask->type == supp_mask.type) {
257 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
258 efx_spec->efs_ether_type = rte_bswap16(spec->type);
259 } else if (mask->type != 0) {
266 rte_flow_error_set(error, EINVAL,
267 RTE_FLOW_ERROR_TYPE_ITEM, item,
268 "Bad mask in the ETH pattern item");
272 static const struct sfc_flow_item sfc_flow_items[] = {
274 .type = RTE_FLOW_ITEM_TYPE_VOID,
275 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
276 .layer = SFC_FLOW_ITEM_ANY_LAYER,
277 .parse = sfc_flow_parse_void,
280 .type = RTE_FLOW_ITEM_TYPE_ETH,
281 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
282 .layer = SFC_FLOW_ITEM_L2,
283 .parse = sfc_flow_parse_eth,
288 * Protocol-independent flow API support
291 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
292 struct rte_flow *flow,
293 struct rte_flow_error *error)
296 rte_flow_error_set(error, EINVAL,
297 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
301 if (attr->group != 0) {
302 rte_flow_error_set(error, ENOTSUP,
303 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
304 "Groups are not supported");
307 if (attr->priority != 0) {
308 rte_flow_error_set(error, ENOTSUP,
309 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
310 "Priorities are not supported");
313 if (attr->egress != 0) {
314 rte_flow_error_set(error, ENOTSUP,
315 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
316 "Egress is not supported");
319 if (attr->ingress == 0) {
320 rte_flow_error_set(error, ENOTSUP,
321 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
322 "Only ingress is supported");
326 flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
327 flow->spec.efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
332 /* Get item from array sfc_flow_items */
333 static const struct sfc_flow_item *
334 sfc_flow_get_item(enum rte_flow_item_type type)
338 for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
339 if (sfc_flow_items[i].type == type)
340 return &sfc_flow_items[i];
346 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
347 struct rte_flow *flow,
348 struct rte_flow_error *error)
351 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
352 const struct sfc_flow_item *item;
354 if (pattern == NULL) {
355 rte_flow_error_set(error, EINVAL,
356 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
361 for (; pattern != NULL &&
362 pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
363 item = sfc_flow_get_item(pattern->type);
365 rte_flow_error_set(error, ENOTSUP,
366 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
367 "Unsupported pattern item");
372 * Omitting one or several protocol layers at the beginning
373 * of pattern is supported
375 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
376 prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
377 item->prev_layer != prev_layer) {
378 rte_flow_error_set(error, ENOTSUP,
379 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
380 "Unexpected sequence of pattern items");
384 rc = item->parse(pattern, &flow->spec, error);
388 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
389 prev_layer = item->layer;
392 if (pattern == NULL) {
393 rte_flow_error_set(error, EINVAL,
394 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
403 sfc_flow_parse_queue(struct sfc_adapter *sa,
404 const struct rte_flow_action_queue *queue,
405 struct rte_flow *flow)
409 if (queue->index >= sa->rxq_count)
412 rxq = sa->rxq_info[queue->index].rxq;
413 flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
419 sfc_flow_parse_actions(struct sfc_adapter *sa,
420 const struct rte_flow_action actions[],
421 struct rte_flow *flow,
422 struct rte_flow_error *error)
425 boolean_t is_specified = B_FALSE;
427 if (actions == NULL) {
428 rte_flow_error_set(error, EINVAL,
429 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
434 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
435 switch (actions->type) {
436 case RTE_FLOW_ACTION_TYPE_VOID:
439 case RTE_FLOW_ACTION_TYPE_QUEUE:
440 rc = sfc_flow_parse_queue(sa, actions->conf, flow);
442 rte_flow_error_set(error, EINVAL,
443 RTE_FLOW_ERROR_TYPE_ACTION, actions,
448 is_specified = B_TRUE;
452 rte_flow_error_set(error, ENOTSUP,
453 RTE_FLOW_ERROR_TYPE_ACTION, actions,
454 "Action is not supported");
460 rte_flow_error_set(error, EINVAL,
461 RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
462 "Action is unspecified");
470 sfc_flow_parse(struct rte_eth_dev *dev,
471 const struct rte_flow_attr *attr,
472 const struct rte_flow_item pattern[],
473 const struct rte_flow_action actions[],
474 struct rte_flow *flow,
475 struct rte_flow_error *error)
477 struct sfc_adapter *sa = dev->data->dev_private;
480 memset(&flow->spec, 0, sizeof(flow->spec));
482 rc = sfc_flow_parse_attr(attr, flow, error);
486 rc = sfc_flow_parse_pattern(pattern, flow, error);
490 rc = sfc_flow_parse_actions(sa, actions, flow, error);
494 if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
495 rte_flow_error_set(error, ENOTSUP,
496 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
497 "Flow rule pattern is not supported");
506 sfc_flow_validate(struct rte_eth_dev *dev,
507 const struct rte_flow_attr *attr,
508 const struct rte_flow_item pattern[],
509 const struct rte_flow_action actions[],
510 struct rte_flow_error *error)
512 struct rte_flow flow;
514 return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
517 static struct rte_flow *
518 sfc_flow_create(struct rte_eth_dev *dev,
519 const struct rte_flow_attr *attr,
520 const struct rte_flow_item pattern[],
521 const struct rte_flow_action actions[],
522 struct rte_flow_error *error)
524 struct sfc_adapter *sa = dev->data->dev_private;
525 struct rte_flow *flow = NULL;
528 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
530 rte_flow_error_set(error, ENOMEM,
531 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
532 "Failed to allocate memory");
536 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
540 TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
542 sfc_adapter_lock(sa);
544 if (sa->state == SFC_ADAPTER_STARTED) {
545 rc = efx_filter_insert(sa->nic, &flow->spec);
547 rte_flow_error_set(error, rc,
548 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
549 "Failed to insert filter");
550 goto fail_filter_insert;
554 sfc_adapter_unlock(sa);
559 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
563 sfc_adapter_unlock(sa);
570 sfc_flow_remove(struct sfc_adapter *sa,
571 struct rte_flow *flow,
572 struct rte_flow_error *error)
576 SFC_ASSERT(sfc_adapter_is_locked(sa));
578 if (sa->state == SFC_ADAPTER_STARTED) {
579 rc = efx_filter_remove(sa->nic, &flow->spec);
581 rte_flow_error_set(error, rc,
582 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
583 "Failed to destroy flow rule");
586 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
593 sfc_flow_destroy(struct rte_eth_dev *dev,
594 struct rte_flow *flow,
595 struct rte_flow_error *error)
597 struct sfc_adapter *sa = dev->data->dev_private;
598 struct rte_flow *flow_ptr;
601 sfc_adapter_lock(sa);
603 TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
604 if (flow_ptr == flow)
608 rte_flow_error_set(error, rc,
609 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
610 "Failed to find flow rule to destroy");
614 rc = sfc_flow_remove(sa, flow, error);
617 sfc_adapter_unlock(sa);
623 sfc_flow_flush(struct rte_eth_dev *dev,
624 struct rte_flow_error *error)
626 struct sfc_adapter *sa = dev->data->dev_private;
627 struct rte_flow *flow;
631 sfc_adapter_lock(sa);
633 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
634 rc = sfc_flow_remove(sa, flow, error);
639 sfc_adapter_unlock(sa);
644 const struct rte_flow_ops sfc_flow_ops = {
645 .validate = sfc_flow_validate,
646 .create = sfc_flow_create,
647 .destroy = sfc_flow_destroy,
648 .flush = sfc_flow_flush,
653 sfc_flow_init(struct sfc_adapter *sa)
655 SFC_ASSERT(sfc_adapter_is_locked(sa));
657 TAILQ_INIT(&sa->filter.flow_list);
661 sfc_flow_fini(struct sfc_adapter *sa)
663 struct rte_flow *flow;
665 SFC_ASSERT(sfc_adapter_is_locked(sa));
667 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
668 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
674 sfc_flow_stop(struct sfc_adapter *sa)
676 struct rte_flow *flow;
678 SFC_ASSERT(sfc_adapter_is_locked(sa));
680 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
681 efx_filter_remove(sa->nic, &flow->spec);
685 sfc_flow_start(struct sfc_adapter *sa)
687 struct rte_flow *flow;
690 sfc_log_init(sa, "entry");
692 SFC_ASSERT(sfc_adapter_is_locked(sa));
694 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
695 rc = efx_filter_insert(sa->nic, &flow->spec);
700 sfc_log_init(sa, "done");