#include <stdarg.h>
#include <rte_debug.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_eth_ctrl.h>
struct acl_rule {
enum ice_fltr_ptype flow_type;
- uint32_t entry_id[4];
+ uint64_t entry_id[4];
};
static struct
ice_pattern_match_item ice_acl_pattern[] = {
- {pattern_eth_ipv4, ICE_ACL_INSET_ETH_IPV4, ICE_INSET_NONE},
- {pattern_eth_ipv4_udp, ICE_ACL_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
- {pattern_eth_ipv4_tcp, ICE_ACL_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
- {pattern_eth_ipv4_sctp, ICE_ACL_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
+ {pattern_eth_ipv4, ICE_ACL_INSET_ETH_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv4_udp, ICE_ACL_INSET_ETH_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv4_tcp, ICE_ACL_INSET_ETH_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
+ {pattern_eth_ipv4_sctp, ICE_ACL_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE, ICE_INSET_NONE},
};
static int
/* For IPV4_OTHER type, should add entry for all types.
* For IPV4_UDP/TCP/SCTP type, only add entry for each.
*/
- if (slot_id < MAX_ACL_ENTRIES) {
+ if (slot_id < MAX_ACL_NORMAL_ENTRIES) {
entry_id = ((uint64_t)flow_type << 32) | slot_id;
ret = ice_flow_add_entry(hw, blk, flow_type,
entry_id, pf->main_vsi->idx,
PMD_DRV_LOG(ERR, "Fail to add entry.");
return ret;
}
- rule->entry_id[entry_idx] = slot_id;
+ rule->entry_id[entry_idx] = entry_id;
pf->acl.hw_entry_id[slot_id] = hw_entry;
} else {
PMD_DRV_LOG(ERR, "Exceed the maximum entry number(%d)"
- " HW supported!", MAX_ACL_ENTRIES);
+ " HW supported!", MAX_ACL_NORMAL_ENTRIES);
return -1;
}
return 0;
}
+static inline void
+ice_acl_del_entry(struct ice_hw *hw, uint64_t entry_id)
+{
+ uint64_t hw_entry;
+
+ hw_entry = ice_flow_find_entry(hw, ICE_BLK_ACL, entry_id);
+ ice_flow_rem_entry(hw, ICE_BLK_ACL, hw_entry);
+}
+
static inline void
ice_acl_hw_rem_conf(struct ice_pf *pf, struct acl_rule *rule, int32_t entry_idx)
{
uint32_t slot_id;
int32_t i;
+ uint64_t entry_id;
struct ice_hw *hw = ICE_PF_TO_HW(pf);
for (i = 0; i < entry_idx; i++) {
- slot_id = rule->entry_id[i];
+ entry_id = rule->entry_id[i];
+ slot_id = ICE_LO_DWORD(entry_id);
rte_bitmap_set(pf->acl.slots, slot_id);
- ice_flow_rem_entry(hw, ICE_BLK_ACL,
- pf->acl.hw_entry_id[slot_id]);
+ ice_acl_del_entry(hw, entry_id);
}
}
{
struct acl_rule *rule = (struct acl_rule *)flow->rule;
uint32_t slot_id, i;
+ uint64_t entry_id;
struct ice_pf *pf = &ad->pf;
struct ice_hw *hw = ICE_PF_TO_HW(pf);
int ret = 0;
switch (rule->flow_type) {
case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
for (i = 0; i < 4; i++) {
- slot_id = rule->entry_id[i];
+ entry_id = rule->entry_id[i];
+ slot_id = ICE_LO_DWORD(entry_id);
rte_bitmap_set(pf->acl.slots, slot_id);
- ice_flow_rem_entry(hw, ICE_BLK_ACL,
- pf->acl.hw_entry_id[slot_id]);
+ ice_acl_del_entry(hw, entry_id);
}
break;
case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
- slot_id = rule->entry_id[0];
+ entry_id = rule->entry_id[0];
+ slot_id = ICE_LO_DWORD(entry_id);
rte_bitmap_set(pf->acl.slots, slot_id);
- ice_flow_rem_entry(hw, ICE_BLK_ACL,
- pf->acl.hw_entry_id[slot_id]);
+ ice_acl_del_entry(hw, entry_id);
break;
default:
rte_flow_error_set(error, EINVAL,
eth_mask = item->mask;
if (eth_spec && eth_mask) {
+ if (rte_is_broadcast_ether_addr(ð_mask->src) ||
+ rte_is_broadcast_ether_addr(ð_mask->dst)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid mac addr mask");
+ return -rte_errno;
+ }
+
if (!rte_is_zero_ether_addr(ð_spec->src) &&
!rte_is_zero_ether_addr(ð_mask->src)) {
input_set |= ICE_INSET_SMAC;
return -rte_errno;
}
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX ||
+ ipv4_mask->hdr.dst_addr == UINT32_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
if (ipv4_mask->hdr.src_addr) {
filter->input.ip.v4.src_ip =
ipv4_spec->hdr.src_addr;
return -rte_errno;
}
+ if (tcp_mask->hdr.src_port == UINT16_MAX ||
+ tcp_mask->hdr.dst_port == UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
tcp_mask->hdr.src_port) {
input_set |= ICE_INSET_TCP_SRC_PORT;
return -rte_errno;
}
+ if (udp_mask->hdr.src_port == UINT16_MAX ||
+ udp_mask->hdr.dst_port == UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
udp_mask->hdr.src_port) {
input_set |= ICE_INSET_UDP_SRC_PORT;
flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
if (sctp_spec && sctp_mask) {
+ if (sctp_mask->hdr.src_port == UINT16_MAX ||
+ sctp_mask->hdr.dst_port == UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid SCTP mask");
+ return -rte_errno;
+ }
+
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
sctp_mask->hdr.src_port) {
input_set |= ICE_INSET_SCTP_SRC_PORT;
uint32_t array_len,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
+ uint32_t priority,
void **meta,
struct rte_flow_error *error)
{
uint64_t input_set;
int ret;
+ if (priority >= 1)
+ return -rte_errno;
+
memset(filter, 0, sizeof(*filter));
- item = ice_search_pattern_match_item(pattern, array, array_len, error);
+ item = ice_search_pattern_match_item(ad, pattern, array, array_len,
+ error);
if (!item)
return -rte_errno;
if (ret)
goto error;
input_set = filter->input_set;
- if (!input_set || input_set & ~item->input_set_mask) {
+ if (!input_set || input_set & ~item->input_set_mask_o) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
pattern,