* Bit-masks covering supported fields by the NIC to compare with user mask.
* @param[in] size
* Bit-masks size in bytes.
+ * @param[in] range_accepted
+ * True if range of values is accepted for specific fields, false otherwise.
* @param[out] error
* Pointer to error structure.
*
const uint8_t *mask,
const uint8_t *nic_mask,
unsigned int size,
+ bool range_accepted,
struct rte_flow_error *error)
{
unsigned int i;
RTE_FLOW_ERROR_TYPE_ITEM, item,
"mask/last without a spec is not"
" supported");
- if (item->spec && item->last) {
+ if (item->spec && item->last && !range_accepted) {
uint8_t spec[size];
uint8_t last[size];
unsigned int i;
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
(const uint8_t *)&rte_flow_item_icmp6_mask,
- sizeof(struct rte_flow_item_icmp6), error);
+ sizeof(struct rte_flow_item_icmp6),
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
if (ret < 0)
return ret;
return 0;
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
- sizeof(struct rte_flow_item_icmp), error);
+ sizeof(struct rte_flow_item_icmp),
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
if (ret < 0)
return ret;
return 0;
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_eth),
- error);
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
return ret;
}
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_vlan),
- error);
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
if (ret)
return ret;
if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
* @param[in] acc_mask
* Acceptable mask, if NULL default internal default mask
* will be used to check whether item fields are supported.
+ * @param[in] range_accepted
+ * True if range of values is accepted for specific fields, false otherwise.
* @param[out] error
* Pointer to error structure.
*
uint64_t last_item,
uint16_t ether_type,
const struct rte_flow_item_ipv4 *acc_mask,
+ bool range_accepted,
struct rte_flow_error *error)
{
const struct rte_flow_item_ipv4 *mask = item->mask;
acc_mask ? (const uint8_t *)acc_mask
: (const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_ipv4),
- error);
+ range_accepted, error);
if (ret < 0)
return ret;
return 0;
RTE_FLOW_ERROR_TYPE_ITEM, item,
"IPv6 cannot follow L2/VLAN layer "
"which ether type is not IPv6");
+ if (mask && mask->hdr.proto == UINT8_MAX && spec)
+ next_proto = spec->hdr.proto;
if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
- if (mask && spec)
- next_proto = mask->hdr.proto & spec->hdr.proto;
if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
"multiple tunnel "
"not supported");
}
+ if (next_proto == IPPROTO_HOPOPTS ||
+ next_proto == IPPROTO_ROUTING ||
+ next_proto == IPPROTO_FRAGMENT ||
+ next_proto == IPPROTO_ESP ||
+ next_proto == IPPROTO_AH ||
+ next_proto == IPPROTO_DSTOPTS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "IPv6 proto (next header) should "
+ "not be set as extension header");
if (item_flags & MLX5_FLOW_LAYER_IPIP)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
acc_mask ? (const uint8_t *)acc_mask
: (const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_ipv6),
- error);
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
if (ret < 0)
return ret;
return 0;
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
(const uint8_t *)&rte_flow_item_udp_mask,
- sizeof(struct rte_flow_item_udp), error);
+ sizeof(struct rte_flow_item_udp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
+ error);
if (ret < 0)
return ret;
return 0;
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
(const uint8_t *)flow_mask,
- sizeof(struct rte_flow_item_tcp), error);
+ sizeof(struct rte_flow_item_tcp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
+ error);
if (ret < 0)
return ret;
return 0;
(item, (const uint8_t *)mask,
(const uint8_t *)&rte_flow_item_vxlan_mask,
sizeof(struct rte_flow_item_vxlan),
- error);
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
if (ret < 0)
return ret;
if (spec) {
(item, (const uint8_t *)mask,
(const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
sizeof(struct rte_flow_item_vxlan_gpe),
- error);
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
if (ret < 0)
return ret;
if (spec) {
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
(const uint8_t *)&gre_key_default_mask,
- sizeof(rte_be32_t), error);
+ sizeof(rte_be32_t), MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
return ret;
}
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
- sizeof(struct rte_flow_item_gre), error);
+ sizeof(struct rte_flow_item_gre), MLX5_ITEM_RANGE_NOT_ACCEPTED,
+ error);
if (ret < 0)
return ret;
#ifndef HAVE_MLX5DV_DR
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
- sizeof(struct rte_flow_item_geneve), error);
+ sizeof(struct rte_flow_item_geneve),
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
if (ret)
return ret;
if (spec) {
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
(const uint8_t *)&rte_flow_item_mpls_mask,
- sizeof(struct rte_flow_item_mpls), error);
+ sizeof(struct rte_flow_item_mpls),
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
if (ret < 0)
return ret;
return 0;
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
(const uint8_t *)&rte_flow_item_nvgre_mask,
- sizeof(struct rte_flow_item_nvgre), error);
+ sizeof(struct rte_flow_item_nvgre),
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
if (ret < 0)
return ret;
return 0;
acc_mask ? (const uint8_t *)acc_mask
: (const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_ecpri),
- error);
+ MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
}
/* Allocate unique ID for the split Q/RSS subflows. */
offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL;
/*
* Identify the counters released between query trigger and query
- * handle more effiecntly. The counter released in this gap period
+ * handle more efficiently. The counter released in this gap period
* should wait for a new round of query as the new arrived packets
* will not be taken into account.
*/
struct mlx5_age_param *age_param;
struct mlx5_counter_stats_raw *cur = pool->raw_hw;
struct mlx5_counter_stats_raw *prev = pool->raw;
- uint16_t curr = rte_rdtsc() / (rte_get_tsc_hz() / 10);
+ const uint64_t curr_time = MLX5_CURR_TIME_SEC;
+ const uint32_t time_delta = curr_time - pool->time_of_last_age_check;
+ uint16_t expected = AGE_CANDIDATE;
uint32_t i;
+ pool->time_of_last_age_check = curr_time;
for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
cnt = MLX5_POOL_GET_CNT(pool, i);
age_param = MLX5_CNT_TO_AGE(cnt);
- if (rte_atomic16_read(&age_param->state) != AGE_CANDIDATE)
+ if (__atomic_load_n(&age_param->state,
+ __ATOMIC_RELAXED) != AGE_CANDIDATE)
continue;
if (cur->data[i].hits != prev->data[i].hits) {
- age_param->expire = curr + age_param->timeout;
+ __atomic_store_n(&age_param->sec_since_last_hit, 0,
+ __ATOMIC_RELAXED);
continue;
}
- if ((uint16_t)(curr - age_param->expire) >= (UINT16_MAX / 2))
+ if (__atomic_add_fetch(&age_param->sec_since_last_hit,
+ time_delta,
+ __ATOMIC_RELAXED) <= age_param->timeout)
continue;
/**
* Hold the lock first, or if between the
priv = rte_eth_devices[age_param->port_id].data->dev_private;
age_info = GET_PORT_AGE_INFO(priv);
rte_spinlock_lock(&age_info->aged_sl);
- /* If the cpmset fails, release happens. */
- if (rte_atomic16_cmpset((volatile uint16_t *)
- &age_param->state,
- AGE_CANDIDATE,
- AGE_TMOUT) ==
- AGE_CANDIDATE) {
+ if (__atomic_compare_exchange_n(&age_param->state, &expected,
+ AGE_TMOUT, false,
+ __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED)) {
TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
}