2 * Copyright (c) 2017 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include <rte_tailq.h>
31 #include <rte_common.h>
32 #include <rte_ethdev.h>
33 #include <rte_eth_ctrl.h>
34 #include <rte_ether.h>
36 #include <rte_flow_driver.h>
42 #include "sfc_filter.h"
47 * At now flow API is implemented in such a manner that each
48 * flow rule is converted to a hardware filter.
49 * All elements of flow rule (attributes, pattern items, actions)
50 * correspond to one or more fields in the efx_filter_spec_s structure
51 * that is responsible for the hardware filter.
54 enum sfc_flow_item_layers {
55 SFC_FLOW_ITEM_ANY_LAYER,
56 SFC_FLOW_ITEM_START_LAYER,
61 typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
62 efx_filter_spec_t *spec,
63 struct rte_flow_error *error);
65 struct sfc_flow_item {
66 enum rte_flow_item_type type; /* Type of item */
67 enum sfc_flow_item_layers layer; /* Layer of item */
68 enum sfc_flow_item_layers prev_layer; /* Previous layer of item */
69 sfc_flow_item_parse *parse; /* Parsing function */
72 static sfc_flow_item_parse sfc_flow_parse_void;
73 static sfc_flow_item_parse sfc_flow_parse_eth;
74 static sfc_flow_item_parse sfc_flow_parse_vlan;
75 static sfc_flow_item_parse sfc_flow_parse_ipv4;
78 sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
83 for (i = 0; i < size; i++)
86 return (sum == 0) ? B_TRUE : B_FALSE;
90 * Validate item and prepare structures spec and mask for parsing
93 sfc_flow_parse_init(const struct rte_flow_item *item,
94 const void **spec_ptr,
95 const void **mask_ptr,
96 const void *supp_mask,
99 struct rte_flow_error *error)
109 rte_flow_error_set(error, EINVAL,
110 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
115 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
116 rte_flow_error_set(error, EINVAL,
117 RTE_FLOW_ERROR_TYPE_ITEM, item,
118 "Mask or last is set without spec");
123 * If "mask" is not set, default mask is used,
124 * but if default mask is NULL, "mask" should be set
126 if (item->mask == NULL) {
127 if (def_mask == NULL) {
128 rte_flow_error_set(error, EINVAL,
129 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
130 "Mask should be specified");
134 mask = (const uint8_t *)def_mask;
136 mask = (const uint8_t *)item->mask;
139 spec = (const uint8_t *)item->spec;
140 last = (const uint8_t *)item->last;
146 * If field values in "last" are either 0 or equal to the corresponding
147 * values in "spec" then they are ignored
150 !sfc_flow_is_zero(last, size) &&
151 memcmp(last, spec, size) != 0) {
152 rte_flow_error_set(error, ENOTSUP,
153 RTE_FLOW_ERROR_TYPE_ITEM, item,
154 "Ranging is not supported");
158 if (supp_mask == NULL) {
159 rte_flow_error_set(error, EINVAL,
160 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
161 "Supported mask for item should be specified");
165 /* Check that mask and spec not asks for more match than supp_mask */
166 for (i = 0; i < size; i++) {
167 match = spec[i] | mask[i];
168 supp = ((const uint8_t *)supp_mask)[i];
170 if ((match | supp) != supp) {
171 rte_flow_error_set(error, ENOTSUP,
172 RTE_FLOW_ERROR_TYPE_ITEM, item,
173 "Item's field is not supported");
186 * Masking is not supported, so masks in items should be either
187 * full or empty (zeroed) and set only for supported fields which
188 * are specified in the supp_mask.
192 sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
193 __rte_unused efx_filter_spec_t *efx_spec,
194 __rte_unused struct rte_flow_error *error)
200 * Convert Ethernet item to EFX filter specification.
203 * Item specification. Only source and destination addresses and
204 * Ethernet type fields are supported. If the mask is NULL, default
205 * mask will be used. Ranging is not supported.
206 * @param efx_spec[in, out]
207 * EFX filter specification to update.
209 * Perform verbose error reporting if not NULL.
212 sfc_flow_parse_eth(const struct rte_flow_item *item,
213 efx_filter_spec_t *efx_spec,
214 struct rte_flow_error *error)
217 const struct rte_flow_item_eth *spec = NULL;
218 const struct rte_flow_item_eth *mask = NULL;
219 const struct rte_flow_item_eth supp_mask = {
220 .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
221 .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
225 rc = sfc_flow_parse_init(item,
226 (const void **)&spec,
227 (const void **)&mask,
229 &rte_flow_item_eth_mask,
230 sizeof(struct rte_flow_item_eth),
235 /* If "spec" is not set, could be any Ethernet */
239 if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
240 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
241 rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
243 } else if (!is_zero_ether_addr(&mask->dst)) {
247 if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
248 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
249 rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
251 } else if (!is_zero_ether_addr(&mask->src)) {
256 * Ether type is in big-endian byte order in item and
257 * in little-endian in efx_spec, so byte swap is used
259 if (mask->type == supp_mask.type) {
260 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
261 efx_spec->efs_ether_type = rte_bswap16(spec->type);
262 } else if (mask->type != 0) {
269 rte_flow_error_set(error, EINVAL,
270 RTE_FLOW_ERROR_TYPE_ITEM, item,
271 "Bad mask in the ETH pattern item");
276 * Convert VLAN item to EFX filter specification.
279 * Item specification. Only VID field is supported.
280 * The mask can not be NULL. Ranging is not supported.
281 * @param efx_spec[in, out]
282 * EFX filter specification to update.
284 * Perform verbose error reporting if not NULL.
287 sfc_flow_parse_vlan(const struct rte_flow_item *item,
288 efx_filter_spec_t *efx_spec,
289 struct rte_flow_error *error)
293 const struct rte_flow_item_vlan *spec = NULL;
294 const struct rte_flow_item_vlan *mask = NULL;
295 const struct rte_flow_item_vlan supp_mask = {
296 .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
299 rc = sfc_flow_parse_init(item,
300 (const void **)&spec,
301 (const void **)&mask,
304 sizeof(struct rte_flow_item_vlan),
310 * VID is in big-endian byte order in item and
311 * in little-endian in efx_spec, so byte swap is used.
312 * If two VLAN items are included, the first matches
313 * the outer tag and the next matches the inner tag.
315 if (mask->tci == supp_mask.tci) {
316 vid = rte_bswap16(spec->tci);
318 if (!(efx_spec->efs_match_flags &
319 EFX_FILTER_MATCH_OUTER_VID)) {
320 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
321 efx_spec->efs_outer_vid = vid;
322 } else if (!(efx_spec->efs_match_flags &
323 EFX_FILTER_MATCH_INNER_VID)) {
324 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
325 efx_spec->efs_inner_vid = vid;
327 rte_flow_error_set(error, EINVAL,
328 RTE_FLOW_ERROR_TYPE_ITEM, item,
329 "More than two VLAN items");
333 rte_flow_error_set(error, EINVAL,
334 RTE_FLOW_ERROR_TYPE_ITEM, item,
335 "VLAN ID in TCI match is required");
343 * Convert IPv4 item to EFX filter specification.
346 * Item specification. Only source and destination addresses and
347 * protocol fields are supported. If the mask is NULL, default
348 * mask will be used. Ranging is not supported.
349 * @param efx_spec[in, out]
350 * EFX filter specification to update.
352 * Perform verbose error reporting if not NULL.
355 sfc_flow_parse_ipv4(const struct rte_flow_item *item,
356 efx_filter_spec_t *efx_spec,
357 struct rte_flow_error *error)
360 const struct rte_flow_item_ipv4 *spec = NULL;
361 const struct rte_flow_item_ipv4 *mask = NULL;
362 const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
363 const struct rte_flow_item_ipv4 supp_mask = {
365 .src_addr = 0xffffffff,
366 .dst_addr = 0xffffffff,
367 .next_proto_id = 0xff,
371 rc = sfc_flow_parse_init(item,
372 (const void **)&spec,
373 (const void **)&mask,
375 &rte_flow_item_ipv4_mask,
376 sizeof(struct rte_flow_item_ipv4),
382 * Filtering by IPv4 source and destination addresses requires
383 * the appropriate ETHER_TYPE in hardware filters
385 if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
386 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
387 efx_spec->efs_ether_type = ether_type_ipv4;
388 } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
389 rte_flow_error_set(error, EINVAL,
390 RTE_FLOW_ERROR_TYPE_ITEM, item,
391 "Ethertype in pattern with IPV4 item should be appropriate");
399 * IPv4 addresses are in big-endian byte order in item and in
402 if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
403 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
404 efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
405 } else if (mask->hdr.src_addr != 0) {
409 if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
410 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
411 efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
412 } else if (mask->hdr.dst_addr != 0) {
416 if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
417 efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
418 efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
419 } else if (mask->hdr.next_proto_id != 0) {
426 rte_flow_error_set(error, EINVAL,
427 RTE_FLOW_ERROR_TYPE_ITEM, item,
428 "Bad mask in the IPV4 pattern item");
432 static const struct sfc_flow_item sfc_flow_items[] = {
434 .type = RTE_FLOW_ITEM_TYPE_VOID,
435 .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
436 .layer = SFC_FLOW_ITEM_ANY_LAYER,
437 .parse = sfc_flow_parse_void,
440 .type = RTE_FLOW_ITEM_TYPE_ETH,
441 .prev_layer = SFC_FLOW_ITEM_START_LAYER,
442 .layer = SFC_FLOW_ITEM_L2,
443 .parse = sfc_flow_parse_eth,
446 .type = RTE_FLOW_ITEM_TYPE_VLAN,
447 .prev_layer = SFC_FLOW_ITEM_L2,
448 .layer = SFC_FLOW_ITEM_L2,
449 .parse = sfc_flow_parse_vlan,
452 .type = RTE_FLOW_ITEM_TYPE_IPV4,
453 .prev_layer = SFC_FLOW_ITEM_L2,
454 .layer = SFC_FLOW_ITEM_L3,
455 .parse = sfc_flow_parse_ipv4,
460 * Protocol-independent flow API support
463 sfc_flow_parse_attr(const struct rte_flow_attr *attr,
464 struct rte_flow *flow,
465 struct rte_flow_error *error)
468 rte_flow_error_set(error, EINVAL,
469 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
473 if (attr->group != 0) {
474 rte_flow_error_set(error, ENOTSUP,
475 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
476 "Groups are not supported");
479 if (attr->priority != 0) {
480 rte_flow_error_set(error, ENOTSUP,
481 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
482 "Priorities are not supported");
485 if (attr->egress != 0) {
486 rte_flow_error_set(error, ENOTSUP,
487 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
488 "Egress is not supported");
491 if (attr->ingress == 0) {
492 rte_flow_error_set(error, ENOTSUP,
493 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
494 "Only ingress is supported");
498 flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
499 flow->spec.efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
504 /* Get item from array sfc_flow_items */
505 static const struct sfc_flow_item *
506 sfc_flow_get_item(enum rte_flow_item_type type)
510 for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
511 if (sfc_flow_items[i].type == type)
512 return &sfc_flow_items[i];
518 sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
519 struct rte_flow *flow,
520 struct rte_flow_error *error)
523 unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
524 const struct sfc_flow_item *item;
526 if (pattern == NULL) {
527 rte_flow_error_set(error, EINVAL,
528 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
533 for (; pattern != NULL &&
534 pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
535 item = sfc_flow_get_item(pattern->type);
537 rte_flow_error_set(error, ENOTSUP,
538 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
539 "Unsupported pattern item");
544 * Omitting one or several protocol layers at the beginning
545 * of pattern is supported
547 if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
548 prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
549 item->prev_layer != prev_layer) {
550 rte_flow_error_set(error, ENOTSUP,
551 RTE_FLOW_ERROR_TYPE_ITEM, pattern,
552 "Unexpected sequence of pattern items");
556 rc = item->parse(pattern, &flow->spec, error);
560 if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
561 prev_layer = item->layer;
564 if (pattern == NULL) {
565 rte_flow_error_set(error, EINVAL,
566 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
575 sfc_flow_parse_queue(struct sfc_adapter *sa,
576 const struct rte_flow_action_queue *queue,
577 struct rte_flow *flow)
581 if (queue->index >= sa->rxq_count)
584 rxq = sa->rxq_info[queue->index].rxq;
585 flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
591 sfc_flow_parse_actions(struct sfc_adapter *sa,
592 const struct rte_flow_action actions[],
593 struct rte_flow *flow,
594 struct rte_flow_error *error)
597 boolean_t is_specified = B_FALSE;
599 if (actions == NULL) {
600 rte_flow_error_set(error, EINVAL,
601 RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
606 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
607 switch (actions->type) {
608 case RTE_FLOW_ACTION_TYPE_VOID:
611 case RTE_FLOW_ACTION_TYPE_QUEUE:
612 rc = sfc_flow_parse_queue(sa, actions->conf, flow);
614 rte_flow_error_set(error, EINVAL,
615 RTE_FLOW_ERROR_TYPE_ACTION, actions,
620 is_specified = B_TRUE;
624 rte_flow_error_set(error, ENOTSUP,
625 RTE_FLOW_ERROR_TYPE_ACTION, actions,
626 "Action is not supported");
632 rte_flow_error_set(error, EINVAL,
633 RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
634 "Action is unspecified");
642 sfc_flow_parse(struct rte_eth_dev *dev,
643 const struct rte_flow_attr *attr,
644 const struct rte_flow_item pattern[],
645 const struct rte_flow_action actions[],
646 struct rte_flow *flow,
647 struct rte_flow_error *error)
649 struct sfc_adapter *sa = dev->data->dev_private;
652 memset(&flow->spec, 0, sizeof(flow->spec));
654 rc = sfc_flow_parse_attr(attr, flow, error);
658 rc = sfc_flow_parse_pattern(pattern, flow, error);
662 rc = sfc_flow_parse_actions(sa, actions, flow, error);
666 if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
667 rte_flow_error_set(error, ENOTSUP,
668 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
669 "Flow rule pattern is not supported");
678 sfc_flow_validate(struct rte_eth_dev *dev,
679 const struct rte_flow_attr *attr,
680 const struct rte_flow_item pattern[],
681 const struct rte_flow_action actions[],
682 struct rte_flow_error *error)
684 struct rte_flow flow;
686 return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
689 static struct rte_flow *
690 sfc_flow_create(struct rte_eth_dev *dev,
691 const struct rte_flow_attr *attr,
692 const struct rte_flow_item pattern[],
693 const struct rte_flow_action actions[],
694 struct rte_flow_error *error)
696 struct sfc_adapter *sa = dev->data->dev_private;
697 struct rte_flow *flow = NULL;
700 flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
702 rte_flow_error_set(error, ENOMEM,
703 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
704 "Failed to allocate memory");
708 rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
712 TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
714 sfc_adapter_lock(sa);
716 if (sa->state == SFC_ADAPTER_STARTED) {
717 rc = efx_filter_insert(sa->nic, &flow->spec);
719 rte_flow_error_set(error, rc,
720 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
721 "Failed to insert filter");
722 goto fail_filter_insert;
726 sfc_adapter_unlock(sa);
731 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
735 sfc_adapter_unlock(sa);
742 sfc_flow_remove(struct sfc_adapter *sa,
743 struct rte_flow *flow,
744 struct rte_flow_error *error)
748 SFC_ASSERT(sfc_adapter_is_locked(sa));
750 if (sa->state == SFC_ADAPTER_STARTED) {
751 rc = efx_filter_remove(sa->nic, &flow->spec);
753 rte_flow_error_set(error, rc,
754 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
755 "Failed to destroy flow rule");
758 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
765 sfc_flow_destroy(struct rte_eth_dev *dev,
766 struct rte_flow *flow,
767 struct rte_flow_error *error)
769 struct sfc_adapter *sa = dev->data->dev_private;
770 struct rte_flow *flow_ptr;
773 sfc_adapter_lock(sa);
775 TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
776 if (flow_ptr == flow)
780 rte_flow_error_set(error, rc,
781 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
782 "Failed to find flow rule to destroy");
786 rc = sfc_flow_remove(sa, flow, error);
789 sfc_adapter_unlock(sa);
795 sfc_flow_flush(struct rte_eth_dev *dev,
796 struct rte_flow_error *error)
798 struct sfc_adapter *sa = dev->data->dev_private;
799 struct rte_flow *flow;
803 sfc_adapter_lock(sa);
805 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
806 rc = sfc_flow_remove(sa, flow, error);
811 sfc_adapter_unlock(sa);
816 const struct rte_flow_ops sfc_flow_ops = {
817 .validate = sfc_flow_validate,
818 .create = sfc_flow_create,
819 .destroy = sfc_flow_destroy,
820 .flush = sfc_flow_flush,
825 sfc_flow_init(struct sfc_adapter *sa)
827 SFC_ASSERT(sfc_adapter_is_locked(sa));
829 TAILQ_INIT(&sa->filter.flow_list);
833 sfc_flow_fini(struct sfc_adapter *sa)
835 struct rte_flow *flow;
837 SFC_ASSERT(sfc_adapter_is_locked(sa));
839 while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
840 TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
846 sfc_flow_stop(struct sfc_adapter *sa)
848 struct rte_flow *flow;
850 SFC_ASSERT(sfc_adapter_is_locked(sa));
852 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
853 efx_filter_remove(sa->nic, &flow->spec);
857 sfc_flow_start(struct sfc_adapter *sa)
859 struct rte_flow *flow;
862 sfc_log_init(sa, "entry");
864 SFC_ASSERT(sfc_adapter_is_locked(sa));
866 TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
867 rc = efx_filter_insert(sa->nic, &flow->spec);
872 sfc_log_init(sa, "done");