case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
/* fill the common ip header */
- arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPv4;
+ arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPV4;
arfs->tuple.dst_ipv4 = input->flow.ip4_flow.dst_ip;
arfs->tuple.src_ipv4 = input->flow.ip4_flow.src_ip;
arfs->tuple.ip_proto = next_proto[input->flow_type];
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
- arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPv6;
+ arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPV6;
arfs->tuple.ip_proto = next_proto[input->flow_type];
rte_memcpy(arfs->tuple.dst_ipv6,
&input->flow.ipv6_flow.dst_ip,
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_ntuple_filter_params params;
char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
struct qede_arfs_entry *tmp = NULL;
const struct rte_memzone *mz;
ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
&qdev->arfs_info.arfs);
}
+
+ memset(¶ms, 0, sizeof(params));
+ params.addr = (dma_addr_t)mz->iova;
+ params.length = pkt_len;
+ params.qid = arfs->rx_queue;
+ params.vport_id = 0;
+ params.b_is_add = add;
+ params.b_is_drop = arfs->is_drop;
+
/* configure filter with ECORE_SPQ_MODE_EBLOCK */
rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL,
- (dma_addr_t)mz->iova,
- pkt_len,
- arfs->rx_queue,
- 0, add);
+ ¶ms);
if (rc == ECORE_SUCCESS) {
if (add) {
arfs->pkt_len = pkt_len;
return -EINVAL;
}
- if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
+ if (fdir->action.rx_queue >= QEDE_RSS_COUNT(eth_dev)) {
DP_ERR(edev, "invalid queue number %u\n",
fdir->action.rx_queue);
return -EINVAL;
*ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto);
switch (arfs->tuple.eth_proto) {
- case RTE_ETHER_TYPE_IPv4:
+ case RTE_ETHER_TYPE_IPV4:
ip = (struct rte_ipv4_hdr *)raw_pkt;
ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
ip->total_length = sizeof(struct rte_ipv4_hdr);
params->tcp = true;
}
break;
- case RTE_ETHER_TYPE_IPv6:
+ case RTE_ETHER_TYPE_IPV6:
ip6 = (struct rte_ipv6_hdr *)raw_pkt;
ip6->proto = arfs->tuple.ip_proto;
ip6->vtc_flow =
static int
_qede_tunn_filter_config(struct rte_eth_dev *eth_dev,
const struct rte_eth_tunnel_filter_conf *conf,
- __attribute__((unused)) enum rte_filter_op filter_op,
+ __rte_unused enum rte_filter_op filter_op,
enum ecore_tunn_clss *clss,
bool add)
{
}
static int
-qede_flow_validate_attr(__attribute__((unused))struct rte_eth_dev *dev,
+qede_flow_validate_attr(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
}
static int
-qede_flow_parse_pattern(__attribute__((unused))struct rte_eth_dev *dev,
+qede_flow_parse_pattern(__rte_unused struct rte_eth_dev *dev,
const struct rte_flow_item pattern[],
struct rte_flow_error *error,
struct rte_flow *flow)
flow->entry.tuple.src_ipv4 = spec->hdr.src_addr;
flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr;
flow->entry.tuple.eth_proto =
- RTE_ETHER_TYPE_IPv4;
+ RTE_ETHER_TYPE_IPV4;
}
break;
spec->hdr.dst_addr,
IPV6_ADDR_LEN);
flow->entry.tuple.eth_proto =
- RTE_ETHER_TYPE_IPv6;
+ RTE_ETHER_TYPE_IPV6;
}
break;
struct rte_flow_error *error,
struct rte_flow *flow)
{
- struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
const struct rte_flow_action_queue *queue;
if (actions == NULL) {
case RTE_FLOW_ACTION_TYPE_QUEUE:
queue = actions->conf;
- if (queue->index >= QEDE_RSS_COUNT(qdev)) {
+ if (queue->index >= QEDE_RSS_COUNT(dev)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
flow->entry.rx_queue = queue->index;
break;
-
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ if (flow)
+ flow->entry.is_drop = true;
+ break;
default:
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions,
- "Action is not supported - only ACTION_TYPE_QUEUE supported");
+ "Action is not supported - only ACTION_TYPE_QUEUE and ACTION_TYPE_DROP supported");
return -rte_errno;
}
}