X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ftap%2Ftap_flow.c;h=1538349e9c92db2c04a2d03394262244e91e586f;hb=392bf9084dd02219738f34f6930851032e1ce32f;hp=533879dd61013112b7444a88d46e25e436111928;hpb=da8841a71346e6d4032b1273d09951370d1d8392;p=dpdk.git diff --git a/drivers/net/tap/tap_flow.c b/drivers/net/tap/tap_flow.c index 533879dd61..1538349e9c 100644 --- a/drivers/net/tap/tap_flow.c +++ b/drivers/net/tap/tap_flow.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of 6WIND S.A. nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2017 6WIND S.A. + * Copyright 2017 Mellanox Technologies, Ltd */ #include @@ -151,6 +123,7 @@ enum key_status_e { }; #define ISOLATE_HANDLE 1 +#define REMOTE_PROMISCUOUS_HANDLE 2 struct rte_flow { LIST_ENTRY(rte_flow) next; /* Pointer to the next rte_flow structure */ @@ -297,13 +270,13 @@ static const struct tap_flow_items tap_flow_items[] = { .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4, RTE_FLOW_ITEM_TYPE_IPV6), .mask = &(const struct rte_flow_item_vlan){ - .tpid = -1, /* DEI matching is not supported */ #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN .tci = 0xffef, #else .tci = 0xefff, #endif + .inner_type = -1, }, .mask_sz = sizeof(struct rte_flow_item_vlan), .default_mask = &rte_flow_item_vlan_mask, @@ -564,18 +537,20 @@ tap_flow_create_eth(const struct rte_flow_item *item, void *data) if (!flow) return 0; msg = &flow->msg; - if (!is_zero_ether_addr(&spec->dst)) { - tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN, + if (!rte_is_zero_ether_addr(&mask->dst)) { + tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, + RTE_ETHER_ADDR_LEN, &spec->dst.addr_bytes); tap_nlattr_add(&msg->nh, - TCA_FLOWER_KEY_ETH_DST_MASK, ETHER_ADDR_LEN, + TCA_FLOWER_KEY_ETH_DST_MASK, RTE_ETHER_ADDR_LEN, &mask->dst.addr_bytes); } - if (!is_zero_ether_addr(&mask->src)) { - tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC, ETHER_ADDR_LEN, - &spec->src.addr_bytes); + if (!rte_is_zero_ether_addr(&mask->src)) { + tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC, + RTE_ETHER_ADDR_LEN, + &spec->src.addr_bytes); tap_nlattr_add(&msg->nh, - TCA_FLOWER_KEY_ETH_SRC_MASK, ETHER_ADDR_LEN, + TCA_FLOWER_KEY_ETH_SRC_MASK, RTE_ETHER_ADDR_LEN, &mask->src.addr_bytes); } return 0; @@ -605,13 +580,19 @@ tap_flow_create_vlan(const struct rte_flow_item *item, void *data) /* use default mask if none provided */ if (!mask) mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_VLAN].default_mask; - /* TC does not support tpid masking. Only accept if exact match. */ - if (mask->tpid && mask->tpid != 0xffff) + /* Outer TPID cannot be matched. */ + if (info->eth_type) return -1; /* Double-tagging not supported. */ - if (spec && mask->tpid && spec->tpid != htons(ETH_P_8021Q)) + if (info->vlan) return -1; info->vlan = 1; + if (mask->inner_type) { + /* TC does not support partial eth_type masking */ + if (mask->inner_type != RTE_BE16(0xffff)) + return -1; + info->eth_type = spec->inner_type; + } if (!flow) return 0; msg = &flow->msg; @@ -672,13 +653,13 @@ tap_flow_create_ipv4(const struct rte_flow_item *item, void *data) info->eth_type = htons(ETH_P_IP); if (!spec) return 0; - if (spec->hdr.dst_addr) { + if (mask->hdr.dst_addr) { tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST, spec->hdr.dst_addr); tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST_MASK, mask->hdr.dst_addr); } - if (spec->hdr.src_addr) { + if (mask->hdr.src_addr) { tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC, spec->hdr.src_addr); tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC_MASK, @@ -728,13 +709,13 @@ tap_flow_create_ipv6(const struct rte_flow_item *item, void *data) info->eth_type = htons(ETH_P_IPV6); if (!spec) return 0; - if (memcmp(spec->hdr.dst_addr, empty_addr, 16)) { + if (memcmp(mask->hdr.dst_addr, empty_addr, 16)) { tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST, sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr); tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK, sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr); } - if (memcmp(spec->hdr.src_addr, empty_addr, 16)) { + if (memcmp(mask->hdr.src_addr, empty_addr, 16)) { tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC, sizeof(spec->hdr.src_addr), &spec->hdr.src_addr); tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK, @@ -783,10 +764,10 @@ tap_flow_create_udp(const struct rte_flow_item *item, void *data) tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_UDP); if (!spec) return 0; - if (spec->hdr.dst_port & mask->hdr.dst_port) + if (mask->hdr.dst_port) tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_DST, spec->hdr.dst_port); - if (spec->hdr.src_port & mask->hdr.src_port) + if (mask->hdr.src_port) tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_SRC, spec->hdr.src_port); return 0; @@ -829,10 +810,10 @@ tap_flow_create_tcp(const struct rte_flow_item *item, void *data) tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_TCP); if (!spec) return 0; - if (spec->hdr.dst_port & mask->hdr.dst_port) + if (mask->hdr.dst_port) tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_DST, spec->hdr.dst_port); - if (spec->hdr.src_port & mask->hdr.src_port) + if (mask->hdr.src_port) tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_SRC, spec->hdr.src_port); return 0; @@ -1060,6 +1041,12 @@ priv_flow_process(struct pmd_internals *pmd, }; int action = 0; /* Only one action authorized for now */ + if (attr->transfer) { + rte_flow_error_set( + error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, + NULL, "transfer is not supported"); + return -rte_errno; + } if (attr->group > MAX_GROUP) { rte_flow_error_set( error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, @@ -1167,6 +1154,7 @@ priv_flow_process(struct pmd_internals *pmd, else goto end; } +actions: for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) { int err = 0; @@ -1241,7 +1229,7 @@ priv_flow_process(struct pmd_internals *pmd, if (err) goto exit_action_not_supported; } - if (flow && rss) + if (flow) err = rss_add_actions(flow, pmd, rss, error); } else { goto exit_action_not_supported; @@ -1249,6 +1237,16 @@ priv_flow_process(struct pmd_internals *pmd, if (err) goto exit_action_not_supported; } + /* When fate is unknown, drop traffic. */ + if (!action) { + static const struct rte_flow_action drop[] = { + { .type = RTE_FLOW_ACTION_TYPE_DROP, }, + { .type = RTE_FLOW_ACTION_TYPE_END, }, + }; + + actions = drop; + goto actions; + } end: if (flow) tap_nlattr_nested_finish(&flow->msg); /* nested TCA_OPTIONS */ @@ -1382,7 +1380,7 @@ tap_flow_create(struct rte_eth_dev *dev, NULL, "priority value too big"); goto fail; } - flow = rte_malloc(__func__, sizeof(struct rte_flow), 0); + flow = rte_zmalloc(__func__, sizeof(struct rte_flow), 0); if (!flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "cannot allocate memory for rte_flow"); @@ -1403,8 +1401,8 @@ tap_flow_create(struct rte_eth_dev *dev, } err = tap_nl_recv_ack(pmd->nlsk_fd); if (err < 0) { - RTE_LOG(ERR, PMD, - "Kernel refused TC filter rule creation (%d): %s\n", + TAP_LOG(ERR, + "Kernel refused TC filter rule creation (%d): %s", errno, strerror(errno)); rte_flow_error_set(error, EEXIST, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -1418,7 +1416,7 @@ tap_flow_create(struct rte_eth_dev *dev, * to the local pmd->if_index. */ if (pmd->remote_if_index) { - remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0); + remote_flow = rte_zmalloc(__func__, sizeof(struct rte_flow), 0); if (!remote_flow) { rte_flow_error_set( error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -1448,8 +1446,8 @@ tap_flow_create(struct rte_eth_dev *dev, } err = tap_nl_recv_ack(pmd->nlsk_fd); if (err < 0) { - RTE_LOG(ERR, PMD, - "Kernel refused TC filter rule creation (%d): %s\n", + TAP_LOG(ERR, + "Kernel refused TC filter rule creation (%d): %s", errno, strerror(errno)); rte_flow_error_set( error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, @@ -1503,8 +1501,8 @@ tap_flow_destroy_pmd(struct pmd_internals *pmd, if (ret < 0 && errno == ENOENT) ret = 0; if (ret < 0) { - RTE_LOG(ERR, PMD, - "Kernel refused TC filter rule deletion (%d): %s\n", + TAP_LOG(ERR, + "Kernel refused TC filter rule deletion (%d): %s", errno, strerror(errno)); rte_flow_error_set( error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -1527,8 +1525,8 @@ tap_flow_destroy_pmd(struct pmd_internals *pmd, if (ret < 0 && errno == ENOENT) ret = 0; if (ret < 0) { - RTE_LOG(ERR, PMD, - "Kernel refused TC filter rule deletion (%d): %s\n", + TAP_LOG(ERR, + "Kernel refused TC filter rule deletion (%d): %s", errno, strerror(errno)); rte_flow_error_set( error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, @@ -1571,32 +1569,37 @@ tap_flow_isolate(struct rte_eth_dev *dev, struct rte_flow_error *error __rte_unused) { struct pmd_internals *pmd = dev->data->dev_private; + struct pmd_process_private *process_private = dev->process_private; + /* normalize 'set' variable to contain 0 or 1 values */ if (set) - pmd->flow_isolate = 1; - else - pmd->flow_isolate = 0; + set = 1; + /* if already in the right isolation mode - nothing to do */ + if ((set ^ pmd->flow_isolate) == 0) + return 0; + /* mark the isolation mode for tap_flow_implicit_create() */ + pmd->flow_isolate = set; /* * If netdevice is there, setup appropriate flow rules immediately. * Otherwise it will be set when bringing up the netdevice (tun_alloc). */ - if (!pmd->rxq[0].fd) + if (!process_private->rxq_fds[0]) return 0; if (set) { - struct rte_flow *flow; + struct rte_flow *remote_flow; while (1) { - flow = LIST_FIRST(&pmd->implicit_flows); - if (!flow) + remote_flow = LIST_FIRST(&pmd->implicit_flows); + if (!remote_flow) break; /* * Remove all implicit rules on the remote. * Keep the local rule to redirect packets on TX. * Keep also the last implicit local rule: ISOLATE. */ - if (flow->msg.t.tcm_ifindex == pmd->if_index) + if (remote_flow->msg.t.tcm_ifindex == pmd->if_index) break; - if (tap_flow_destroy_pmd(pmd, flow, NULL) < 0) + if (tap_flow_destroy_pmd(pmd, remote_flow, NULL) < 0) goto error; } /* Switch the TC rule according to pmd->flow_isolate */ @@ -1690,9 +1693,9 @@ int tap_flow_implicit_create(struct pmd_internals *pmd, } }; - remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0); + remote_flow = rte_zmalloc(__func__, sizeof(struct rte_flow), 0); if (!remote_flow) { - RTE_LOG(ERR, PMD, "Cannot allocate memory for rte_flow\n"); + TAP_LOG(ERR, "Cannot allocate memory for rte_flow"); goto fail; } msg = &remote_flow->msg; @@ -1720,29 +1723,39 @@ int tap_flow_implicit_create(struct pmd_internals *pmd, * The ISOLATE rule is always present and must have a static handle, as * the action is changed whether the feature is enabled (DROP) or * disabled (PASSTHRU). + * There is just one REMOTE_PROMISCUOUS rule in all cases. It should + * have a static handle such that adding it twice will fail with EEXIST + * with any kernel version. Remark: old kernels may falsely accept the + * same REMOTE_PROMISCUOUS rules if they had different handles. */ if (idx == TAP_ISOLATE) remote_flow->msg.t.tcm_handle = ISOLATE_HANDLE; + else if (idx == TAP_REMOTE_PROMISC) + remote_flow->msg.t.tcm_handle = REMOTE_PROMISCUOUS_HANDLE; else tap_flow_set_handle(remote_flow); if (priv_flow_process(pmd, attr, items, actions, NULL, remote_flow, implicit_rte_flows[idx].mirred)) { - RTE_LOG(ERR, PMD, "rte flow rule validation failed\n"); + TAP_LOG(ERR, "rte flow rule validation failed"); goto fail; } err = tap_nl_send(pmd->nlsk_fd, &msg->nh); if (err < 0) { - RTE_LOG(ERR, PMD, "Failure sending nl request\n"); + TAP_LOG(ERR, "Failure sending nl request"); goto fail; } err = tap_nl_recv_ack(pmd->nlsk_fd); if (err < 0) { - RTE_LOG(ERR, PMD, - "Kernel refused TC filter rule creation (%d): %s\n", + /* Silently ignore re-entering existing rule */ + if (errno == EEXIST) + goto success; + TAP_LOG(ERR, + "Kernel refused TC filter rule creation (%d): %s", errno, strerror(errno)); goto fail; } LIST_INSERT_HEAD(&pmd->implicit_flows, remote_flow, next); +success: return 0; fail: if (remote_flow) @@ -1797,9 +1810,10 @@ tap_flow_implicit_flush(struct pmd_internals *pmd, struct rte_flow_error *error) } #define MAX_RSS_KEYS 256 +#define KEY_IDX_OFFSET (3 * MAX_RSS_KEYS) #define SEC_NAME_CLS_Q "cls_q" -const char *sec_name[SEC_MAX] = { +static const char *sec_name[SEC_MAX] = { [SEC_L3_L4] = "l3_l4", }; @@ -1852,8 +1866,8 @@ static int rss_enable(struct pmd_internals *pmd, sizeof(struct rss_key), MAX_RSS_KEYS); if (pmd->map_fd < 0) { - RTE_LOG(ERR, PMD, - "Failed to create BPF map (%d): %s\n", + TAP_LOG(ERR, + "Failed to create BPF map (%d): %s", errno, strerror(errno)); rte_flow_error_set( error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -1870,7 +1884,7 @@ static int rss_enable(struct pmd_internals *pmd, for (i = 0; i < pmd->dev->data->nb_rx_queues; i++) { pmd->bpf_fd[i] = tap_flow_bpf_cls_q(i); if (pmd->bpf_fd[i] < 0) { - RTE_LOG(ERR, PMD, + TAP_LOG(ERR, "Failed to load BPF section %s for queue %d", SEC_NAME_CLS_Q, i); rte_flow_error_set( @@ -1882,9 +1896,9 @@ static int rss_enable(struct pmd_internals *pmd, return -ENOTSUP; } - rss_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0); + rss_flow = rte_zmalloc(__func__, sizeof(struct rte_flow), 0); if (!rss_flow) { - RTE_LOG(ERR, PMD, + TAP_LOG(ERR, "Cannot allocate memory for rte_flow"); return -1; } @@ -1927,8 +1941,8 @@ static int rss_enable(struct pmd_internals *pmd, return -1; err = tap_nl_recv_ack(pmd->nlsk_fd); if (err < 0) { - RTE_LOG(ERR, PMD, - "Kernel refused TC filter rule creation (%d): %s\n", + TAP_LOG(ERR, + "Kernel refused TC filter rule creation (%d): %s", errno, strerror(errno)); return err; } @@ -1953,38 +1967,63 @@ static int rss_enable(struct pmd_internals *pmd, static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx) { __u32 i; - int err = -1; + int err = 0; static __u32 num_used_keys; static __u32 rss_keys[MAX_RSS_KEYS] = {KEY_STAT_UNSPEC}; static __u32 rss_keys_initialized; + __u32 key; switch (cmd) { case KEY_CMD_GET: - if (!rss_keys_initialized) + if (!rss_keys_initialized) { + err = -1; break; + } - if (num_used_keys == RTE_DIM(rss_keys)) + if (num_used_keys == RTE_DIM(rss_keys)) { + err = -1; break; + } *key_idx = num_used_keys % RTE_DIM(rss_keys); while (rss_keys[*key_idx] == KEY_STAT_USED) *key_idx = (*key_idx + 1) % RTE_DIM(rss_keys); rss_keys[*key_idx] = KEY_STAT_USED; + + /* + * Add an offset to key_idx in order to handle a case of + * RSS and non RSS flows mixture. + * If a non RSS flow is destroyed it has an eBPF map + * index 0 (initialized on flow creation) and might + * unintentionally remove RSS entry 0 from eBPF map. + * To avoid this issue, add an offset to the real index + * during a KEY_CMD_GET operation and subtract this offset + * during a KEY_CMD_RELEASE operation in order to restore + * the real index. + */ + *key_idx += KEY_IDX_OFFSET; num_used_keys++; - err = 0; break; case KEY_CMD_RELEASE: - if (!rss_keys_initialized) { - err = 0; + if (!rss_keys_initialized) + break; + + /* + * Subtract offest to restore real key index + * If a non RSS flow is falsely trying to release map + * entry 0 - the offset subtraction will calculate the real + * map index as an out-of-range value and the release operation + * will be silently ignored. + */ + key = *key_idx - KEY_IDX_OFFSET; + if (key >= RTE_DIM(rss_keys)) break; - } - if (rss_keys[*key_idx] == KEY_STAT_USED) { - rss_keys[*key_idx] = KEY_STAT_AVAILABLE; + if (rss_keys[key] == KEY_STAT_USED) { + rss_keys[key] = KEY_STAT_AVAILABLE; num_used_keys--; - err = 0; } break; @@ -1994,7 +2033,6 @@ static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx) rss_keys_initialized = 1; num_used_keys = 0; - err = 0; break; case KEY_CMD_DEINIT: @@ -2003,7 +2041,6 @@ static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx) rss_keys_initialized = 0; num_used_keys = 0; - err = 0; break; default: @@ -2032,11 +2069,21 @@ static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd, struct rte_flow_error *error) { /* 4096 is the maximum number of instructions for a BPF program */ - int i; + unsigned int i; int err; struct rss_key rss_entry = { .hash_fields = 0, .key_size = 0 }; + /* Check supported RSS features */ + if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "non-default RSS hash functions are not supported"); + if (rss->level) + return rte_flow_error_set + (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "a nonzero RSS encapsulation level is not supported"); + /* Get a new map key for a new RSS rule */ err = bpf_rss_key(KEY_CMD_GET, &flow->key_idx); if (err < 0) { @@ -2048,8 +2095,8 @@ static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd, } /* Update RSS map entry with queues */ - rss_entry.nb_queues = rss->num; - for (i = 0; i < rss->num; i++) + rss_entry.nb_queues = rss->queue_num; + for (i = 0; i < rss->queue_num; i++) rss_entry.queues[i] = rss->queue[i]; rss_entry.hash_fields = (1 << HASH_FIELD_IPV4_L3_L4) | (1 << HASH_FIELD_IPV6_L3_L4); @@ -2059,8 +2106,8 @@ static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd, &flow->key_idx, &rss_entry); if (err) { - RTE_LOG(ERR, PMD, - "Failed to update BPF map entry #%u (%d): %s\n", + TAP_LOG(ERR, + "Failed to update BPF map entry #%u (%d): %s", flow->key_idx, errno, strerror(errno)); rte_flow_error_set( error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -2078,8 +2125,8 @@ static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd, flow->bpf_fd[SEC_L3_L4] = tap_flow_bpf_calc_l3_l4_hash(flow->key_idx, pmd->map_fd); if (flow->bpf_fd[SEC_L3_L4] < 0) { - RTE_LOG(ERR, PMD, - "Failed to load BPF section %s (%d): %s\n", + TAP_LOG(ERR, + "Failed to load BPF section %s (%d): %s", sec_name[SEC_L3_L4], errno, strerror(errno)); rte_flow_error_set( error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -2140,9 +2187,8 @@ tap_dev_filter_ctrl(struct rte_eth_dev *dev, *(const void **)arg = &tap_flow_ops; return 0; default: - RTE_LOG(ERR, PMD, "%p: filter type (%d) not supported\n", - (void *)dev, filter_type); + TAP_LOG(ERR, "%p: filter type (%d) not supported", + dev, filter_type); } return -EINVAL; } -