if (!flow)
return 0;
msg = &flow->msg;
- if (!is_zero_ether_addr(&spec->dst)) {
- tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN,
+ if (!rte_is_zero_ether_addr(&mask->dst)) {
+ tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST,
+ RTE_ETHER_ADDR_LEN,
&spec->dst.addr_bytes);
tap_nlattr_add(&msg->nh,
- TCA_FLOWER_KEY_ETH_DST_MASK, ETHER_ADDR_LEN,
+ TCA_FLOWER_KEY_ETH_DST_MASK, RTE_ETHER_ADDR_LEN,
&mask->dst.addr_bytes);
}
- if (!is_zero_ether_addr(&mask->src)) {
- tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC, ETHER_ADDR_LEN,
- &spec->src.addr_bytes);
+ if (!rte_is_zero_ether_addr(&mask->src)) {
+ tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC,
+ RTE_ETHER_ADDR_LEN,
+ &spec->src.addr_bytes);
tap_nlattr_add(&msg->nh,
- TCA_FLOWER_KEY_ETH_SRC_MASK, ETHER_ADDR_LEN,
+ TCA_FLOWER_KEY_ETH_SRC_MASK, RTE_ETHER_ADDR_LEN,
&mask->src.addr_bytes);
}
return 0;
info->eth_type = htons(ETH_P_IP);
if (!spec)
return 0;
- if (spec->hdr.dst_addr) {
+ if (mask->hdr.dst_addr) {
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST,
spec->hdr.dst_addr);
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST_MASK,
mask->hdr.dst_addr);
}
- if (spec->hdr.src_addr) {
+ if (mask->hdr.src_addr) {
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC,
spec->hdr.src_addr);
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC_MASK,
info->eth_type = htons(ETH_P_IPV6);
if (!spec)
return 0;
- if (memcmp(spec->hdr.dst_addr, empty_addr, 16)) {
+ if (memcmp(mask->hdr.dst_addr, empty_addr, 16)) {
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
}
- if (memcmp(spec->hdr.src_addr, empty_addr, 16)) {
+ if (memcmp(mask->hdr.src_addr, empty_addr, 16)) {
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_UDP);
if (!spec)
return 0;
- if (spec->hdr.dst_port & mask->hdr.dst_port)
+ if (mask->hdr.dst_port)
tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_DST,
spec->hdr.dst_port);
- if (spec->hdr.src_port & mask->hdr.src_port)
+ if (mask->hdr.src_port)
tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_SRC,
spec->hdr.src_port);
return 0;
tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_TCP);
if (!spec)
return 0;
- if (spec->hdr.dst_port & mask->hdr.dst_port)
+ if (mask->hdr.dst_port)
tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_DST,
spec->hdr.dst_port);
- if (spec->hdr.src_port & mask->hdr.src_port)
+ if (mask->hdr.src_port)
tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_SRC,
spec->hdr.src_port);
return 0;
NULL, "priority value too big");
goto fail;
}
- flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+ flow = rte_zmalloc(__func__, sizeof(struct rte_flow), 0);
if (!flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "cannot allocate memory for rte_flow");
* to the local pmd->if_index.
*/
if (pmd->remote_if_index) {
- remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+ remote_flow = rte_zmalloc(__func__, sizeof(struct rte_flow), 0);
if (!remote_flow) {
rte_flow_error_set(
error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
struct rte_flow_error *error __rte_unused)
{
struct pmd_internals *pmd = dev->data->dev_private;
+ struct pmd_process_private *process_private = dev->process_private;
+ /* normalize 'set' variable to contain 0 or 1 values */
if (set)
- pmd->flow_isolate = 1;
- else
- pmd->flow_isolate = 0;
+ set = 1;
+ /* if already in the right isolation mode - nothing to do */
+ if ((set ^ pmd->flow_isolate) == 0)
+ return 0;
+ /* mark the isolation mode for tap_flow_implicit_create() */
+ pmd->flow_isolate = set;
/*
* If netdevice is there, setup appropriate flow rules immediately.
* Otherwise it will be set when bringing up the netdevice (tun_alloc).
*/
- if (!pmd->rxq[0].fd)
+ if (!process_private->rxq_fds[0])
return 0;
if (set) {
- struct rte_flow *flow;
+ struct rte_flow *remote_flow;
while (1) {
- flow = LIST_FIRST(&pmd->implicit_flows);
- if (!flow)
+ remote_flow = LIST_FIRST(&pmd->implicit_flows);
+ if (!remote_flow)
break;
/*
* Remove all implicit rules on the remote.
* Keep the local rule to redirect packets on TX.
* Keep also the last implicit local rule: ISOLATE.
*/
- if (flow->msg.t.tcm_ifindex == pmd->if_index)
+ if (remote_flow->msg.t.tcm_ifindex == pmd->if_index)
break;
- if (tap_flow_destroy_pmd(pmd, flow, NULL) < 0)
+ if (tap_flow_destroy_pmd(pmd, remote_flow, NULL) < 0)
goto error;
}
/* Switch the TC rule according to pmd->flow_isolate */
}
};
- remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+ remote_flow = rte_zmalloc(__func__, sizeof(struct rte_flow), 0);
if (!remote_flow) {
TAP_LOG(ERR, "Cannot allocate memory for rte_flow");
goto fail;
}
err = tap_nl_recv_ack(pmd->nlsk_fd);
if (err < 0) {
- /* Silently ignore re-entering remote promiscuous rule */
- if (errno == EEXIST && idx == TAP_REMOTE_PROMISC)
+ /* Silently ignore re-entering existing rule */
+ if (errno == EEXIST)
goto success;
TAP_LOG(ERR,
"Kernel refused TC filter rule creation (%d): %s",
#define KEY_IDX_OFFSET (3 * MAX_RSS_KEYS)
#define SEC_NAME_CLS_Q "cls_q"
-const char *sec_name[SEC_MAX] = {
+static const char *sec_name[SEC_MAX] = {
[SEC_L3_L4] = "l3_l4",
};
return -ENOTSUP;
}
- rss_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+ rss_flow = rte_zmalloc(__func__, sizeof(struct rte_flow), 0);
if (!rss_flow) {
TAP_LOG(ERR,
"Cannot allocate memory for rte_flow");