.items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_IPV6),
.mask = &(const struct rte_flow_item_vlan){
.items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
RTE_FLOW_ITEM_TYPE_IPV6),
.mask = &(const struct rte_flow_item_vlan){
},
.mask_sz = sizeof(struct rte_flow_item_vlan),
.default_mask = &rte_flow_item_vlan_mask,
},
.mask_sz = sizeof(struct rte_flow_item_vlan),
.default_mask = &rte_flow_item_vlan_mask,
- if (!is_zero_ether_addr(&spec->dst)) {
- tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN,
+ if (!rte_is_zero_ether_addr(&mask->dst)) {
+ tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST,
+ RTE_ETHER_ADDR_LEN,
- if (!is_zero_ether_addr(&mask->src)) {
- tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC, ETHER_ADDR_LEN,
- &spec->src.addr_bytes);
+ if (!rte_is_zero_ether_addr(&mask->src)) {
+ tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC,
+ RTE_ETHER_ADDR_LEN,
+ &spec->src.addr_bytes);
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST,
spec->hdr.dst_addr);
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST_MASK,
mask->hdr.dst_addr);
}
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST,
spec->hdr.dst_addr);
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST_MASK,
mask->hdr.dst_addr);
}
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC,
spec->hdr.src_addr);
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC_MASK,
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC,
spec->hdr.src_addr);
tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC_MASK,
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
}
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
}
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
if (attr->group > MAX_GROUP) {
rte_flow_error_set(
error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
if (attr->group > MAX_GROUP) {
rte_flow_error_set(
error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
if (!flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "cannot allocate memory for rte_flow");
if (!flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "cannot allocate memory for rte_flow");
errno, strerror(errno));
rte_flow_error_set(error, EEXIST, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL,
errno, strerror(errno));
rte_flow_error_set(error, EEXIST, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL,
if (!remote_flow) {
rte_flow_error_set(
error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
if (!remote_flow) {
rte_flow_error_set(
error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
errno, strerror(errno));
rte_flow_error_set(
error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
errno, strerror(errno));
rte_flow_error_set(
error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
errno, strerror(errno));
rte_flow_error_set(
error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
errno, strerror(errno));
rte_flow_error_set(
error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
errno, strerror(errno));
rte_flow_error_set(
error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
errno, strerror(errno));
rte_flow_error_set(
error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
/*
* If netdevice is there, setup appropriate flow rules immediately.
* Otherwise it will be set when bringing up the netdevice (tun_alloc).
*/
/*
* If netdevice is there, setup appropriate flow rules immediately.
* Otherwise it will be set when bringing up the netdevice (tun_alloc).
*/
break;
/*
* Remove all implicit rules on the remote.
* Keep the local rule to redirect packets on TX.
* Keep also the last implicit local rule: ISOLATE.
*/
break;
/*
* Remove all implicit rules on the remote.
* Keep the local rule to redirect packets on TX.
* Keep also the last implicit local rule: ISOLATE.
*/
tap_flow_set_handle(remote_flow);
if (priv_flow_process(pmd, attr, items, actions, NULL,
remote_flow, implicit_rte_flows[idx].mirred)) {
tap_flow_set_handle(remote_flow);
if (priv_flow_process(pmd, attr, items, actions, NULL,
remote_flow, implicit_rte_flows[idx].mirred)) {
errno, strerror(errno));
rte_flow_error_set(
error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
errno, strerror(errno));
rte_flow_error_set(
error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
for (i = 0; i < pmd->dev->data->nb_rx_queues; i++) {
pmd->bpf_fd[i] = tap_flow_bpf_cls_q(i);
if (pmd->bpf_fd[i] < 0) {
for (i = 0; i < pmd->dev->data->nb_rx_queues; i++) {
pmd->bpf_fd[i] = tap_flow_bpf_cls_q(i);
if (pmd->bpf_fd[i] < 0) {
if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"non-default RSS hash functions are not supported");
if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "a nonzero RSS encapsulation level is not supported");
/* Get a new map key for a new RSS rule */
err = bpf_rss_key(KEY_CMD_GET, &flow->key_idx);
/* Get a new map key for a new RSS rule */
err = bpf_rss_key(KEY_CMD_GET, &flow->key_idx);
flow->key_idx, errno, strerror(errno));
rte_flow_error_set(
error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
flow->key_idx, errno, strerror(errno));
rte_flow_error_set(
error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
flow->bpf_fd[SEC_L3_L4] =
tap_flow_bpf_calc_l3_l4_hash(flow->key_idx, pmd->map_fd);
if (flow->bpf_fd[SEC_L3_L4] < 0) {
flow->bpf_fd[SEC_L3_L4] =
tap_flow_bpf_calc_l3_l4_hash(flow->key_idx, pmd->map_fd);
if (flow->bpf_fd[SEC_L3_L4] < 0) {
sec_name[SEC_L3_L4], errno, strerror(errno));
rte_flow_error_set(
error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
sec_name[SEC_L3_L4], errno, strerror(errno));
rte_flow_error_set(
error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
* Pointer to operation-specific structure.
*
* @return
* 0 on success, negative errno value on failure.
*/
int
* Pointer to operation-specific structure.
*
* @return
* 0 on success, negative errno value on failure.
*/
int
-tap_dev_filter_ctrl(struct rte_eth_dev *dev,
- enum rte_filter_type filter_type,
- enum rte_filter_op filter_op,
- void *arg)
+tap_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
+ const struct rte_flow_ops **ops)
- switch (filter_type) {
- case RTE_ETH_FILTER_GENERIC:
- if (filter_op != RTE_ETH_FILTER_GET)
- return -EINVAL;
- *(const void **)arg = &tap_flow_ops;
- return 0;
- default:
- RTE_LOG(ERR, PMD, "%p: filter type (%d) not supported\n",
- (void *)dev, filter_type);
- }
- return -EINVAL;
+ *ops = &tap_flow_ops;
+ return 0;