if (!flow)
return 0;
msg = &flow->msg;
- if (!is_zero_ether_addr(&spec->dst)) {
- tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN,
+ if (!rte_is_zero_ether_addr(&mask->dst)) {
+ tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST,
+ RTE_ETHER_ADDR_LEN,
&spec->dst.addr_bytes);
tap_nlattr_add(&msg->nh,
- TCA_FLOWER_KEY_ETH_DST_MASK, ETHER_ADDR_LEN,
+ TCA_FLOWER_KEY_ETH_DST_MASK, RTE_ETHER_ADDR_LEN,
&mask->dst.addr_bytes);
}
- if (!is_zero_ether_addr(&mask->src)) {
- tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC, ETHER_ADDR_LEN,
- &spec->src.addr_bytes);
+ if (!rte_is_zero_ether_addr(&mask->src)) {
+ tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC,
+ RTE_ETHER_ADDR_LEN,
+ &spec->src.addr_bytes);
tap_nlattr_add(&msg->nh,
- TCA_FLOWER_KEY_ETH_SRC_MASK, ETHER_ADDR_LEN,
+ TCA_FLOWER_KEY_ETH_SRC_MASK, RTE_ETHER_ADDR_LEN,
&mask->src.addr_bytes);
}
return 0;
info->eth_type = htons(ETH_P_IP);
if (!spec)
return 0;
- if (spec->hdr.dst_addr) {
+ if (mask->hdr.dst_addr) {
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST,
spec->hdr.dst_addr);
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST_MASK,
mask->hdr.dst_addr);
}
- if (spec->hdr.src_addr) {
+ if (mask->hdr.src_addr) {
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC,
spec->hdr.src_addr);
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC_MASK,
info->eth_type = htons(ETH_P_IPV6);
if (!spec)
return 0;
- if (memcmp(spec->hdr.dst_addr, empty_addr, 16)) {
+ if (memcmp(mask->hdr.dst_addr, empty_addr, 16)) {
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
}
- if (memcmp(spec->hdr.src_addr, empty_addr, 16)) {
+ if (memcmp(mask->hdr.src_addr, empty_addr, 16)) {
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_UDP);
if (!spec)
return 0;
- if (spec->hdr.dst_port & mask->hdr.dst_port)
+ if (mask->hdr.dst_port)
tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_DST,
spec->hdr.dst_port);
- if (spec->hdr.src_port & mask->hdr.src_port)
+ if (mask->hdr.src_port)
tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_SRC,
spec->hdr.src_port);
return 0;
tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_TCP);
if (!spec)
return 0;
- if (spec->hdr.dst_port & mask->hdr.dst_port)
+ if (mask->hdr.dst_port)
tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_DST,
spec->hdr.dst_port);
- if (spec->hdr.src_port & mask->hdr.src_port)
+ if (mask->hdr.src_port)
tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_SRC,
spec->hdr.src_port);
return 0;
static void
tap_flow_set_handle(struct rte_flow *flow)
{
+ union {
+ struct rte_flow *flow;
+ const void *key;
+ } tmp;
uint32_t handle = 0;
+ tmp.flow = flow;
+
if (sizeof(flow) > 4)
- handle = rte_jhash(&flow, sizeof(flow), 1);
+ handle = rte_jhash(tmp.key, sizeof(flow), 1);
else
handle = (uintptr_t)flow;
/* must be at least 1 to avoid letting the kernel choose one for us */
NULL, "priority value too big");
goto fail;
}
- flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+ flow = rte_zmalloc(__func__, sizeof(struct rte_flow), 0);
if (!flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "cannot allocate memory for rte_flow");
* to the local pmd->if_index.
*/
if (pmd->remote_if_index) {
- remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+ remote_flow = rte_zmalloc(__func__, sizeof(struct rte_flow), 0);
if (!remote_flow) {
rte_flow_error_set(
error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
struct rte_flow_error *error __rte_unused)
{
struct pmd_internals *pmd = dev->data->dev_private;
+ struct pmd_process_private *process_private = dev->process_private;
/* normalize 'set' variable to contain 0 or 1 values */
if (set)
* If netdevice is there, setup appropriate flow rules immediately.
* Otherwise it will be set when bringing up the netdevice (tun_alloc).
*/
- if (!pmd->rxq[0].fd)
+ if (!process_private->rxq_fds[0])
return 0;
if (set) {
struct rte_flow *remote_flow;
}
};
- remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+ remote_flow = rte_zmalloc(__func__, sizeof(struct rte_flow), 0);
if (!remote_flow) {
TAP_LOG(ERR, "Cannot allocate memory for rte_flow");
goto fail;
#define KEY_IDX_OFFSET (3 * MAX_RSS_KEYS)
#define SEC_NAME_CLS_Q "cls_q"
-const char *sec_name[SEC_MAX] = {
+static const char *sec_name[SEC_MAX] = {
[SEC_L3_L4] = "l3_l4",
};
return -ENOTSUP;
}
- rss_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+ rss_flow = rte_zmalloc(__func__, sizeof(struct rte_flow), 0);
if (!rss_flow) {
TAP_LOG(ERR,
"Cannot allocate memory for rte_flow");
}
/**
- * Manage filter operations.
+ * Get rte_flow operations.
*
* @param dev
* Pointer to Ethernet device structure.
- * @param filter_type
- * Filter type.
- * @param filter_op
- * Operation to perform.
- * @param arg
+ * @param ops
* Pointer to operation-specific structure.
*
* @return
* 0 on success, negative errno value on failure.
*/
int
-tap_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+tap_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
{
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- *(const void **)arg = &tap_flow_ops;
- return 0;
- default:
- TAP_LOG(ERR, "%p: filter type (%d) not supported",
- dev, filter_type);
- }
- return -EINVAL;
+ *ops = &tap_flow_ops;
+ return 0;
}