git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
net: replace IPv4/v6 constants with uppercase name
[dpdk.git]
/
drivers
/
net
/
cxgbe
/
cxgbe_flow.c
diff --git
a/drivers/net/cxgbe/cxgbe_flow.c
b/drivers/net/cxgbe/cxgbe_flow.c
index
a8f076e
..
d3de689
100644
(file)
--- a/
drivers/net/cxgbe/cxgbe_flow.c
+++ b/
drivers/net/cxgbe/cxgbe_flow.c
@@
-2,7
+2,7
@@
* Copyright(c) 2018 Chelsio Communications.
* All rights reserved.
*/
* Copyright(c) 2018 Chelsio Communications.
* All rights reserved.
*/
-#include "common.h"
+#include "
base/
common.h"
#include "cxgbe_flow.h"
#define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
#include "cxgbe_flow.h"
#define __CXGBE_FILL_FS(__v, __m, fs, elem, e) \
@@
-115,12
+115,12
@@
ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
/* we don't support SRC_MAC filtering*/
mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
/* we don't support SRC_MAC filtering*/
- if (!is_zero_ether_addr(&mask->src))
+ if (!
rte_
is_zero_ether_addr(&mask->src))
return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
item,
"src mac filtering not supported");
return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
item,
"src mac filtering not supported");
- if (!is_zero_ether_addr(&mask->dst)) {
+ if (!
rte_
is_zero_ether_addr(&mask->dst)) {
const u8 *addr = (const u8 *)&spec->dst.addr_bytes[0];
const u8 *m = (const u8 *)&mask->dst.addr_bytes[0];
struct rte_flow *flow = (struct rte_flow *)fs->private;
const u8 *addr = (const u8 *)&spec->dst.addr_bytes[0];
const u8 *m = (const u8 *)&mask->dst.addr_bytes[0];
struct rte_flow *flow = (struct rte_flow *)fs->private;
@@
-233,7
+233,7
@@
ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item,
item, "ttl/tos are not supported");
fs->type = FILTER_TYPE_IPV4;
item, "ttl/tos are not supported");
fs->type = FILTER_TYPE_IPV4;
- CXGBE_FILL_FS(
ETHER_TYPE_IPv
4, 0xffff, ethtype);
+ CXGBE_FILL_FS(
RTE_ETHER_TYPE_IPV
4, 0xffff, ethtype);
if (!val)
return 0; /* ipv4 wild card */
if (!val)
return 0; /* ipv4 wild card */
@@
-262,7
+262,7
@@
ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item,
"tc/flow/hop are not supported");
fs->type = FILTER_TYPE_IPV6;
"tc/flow/hop are not supported");
fs->type = FILTER_TYPE_IPV6;
- CXGBE_FILL_FS(
ETHER_TYPE_IPv
6, 0xffff, ethtype);
+ CXGBE_FILL_FS(
RTE_ETHER_TYPE_IPV
6, 0xffff, ethtype);
if (!val)
return 0; /* ipv6 wild card */
if (!val)
return 0; /* ipv6 wild card */
@@
-448,7
+448,7
@@
ch_rte_parse_atype_switch(const struct rte_flow_action *a,
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
pushvlan = (const struct rte_flow_action_of_push_vlan *)
a->conf;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
pushvlan = (const struct rte_flow_action_of_push_vlan *)
a->conf;
- if (pushvlan->ethertype != ETHER_TYPE_VLAN)
+ if (pushvlan->ethertype !=
RTE_
ETHER_TYPE_VLAN)
return rte_flow_error_set(e, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, a,
"only ethertype 0x8100 "
return rte_flow_error_set(e, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, a,
"only ethertype 0x8100 "
@@
-732,6
+732,10
@@
cxgbe_rtef_parse_items(struct rte_flow *flow,
"parse items cannot be repeated (except void)");
repeat[i->type] = 1;
"parse items cannot be repeated (except void)");
repeat[i->type] = 1;
+ /* No spec found for this pattern item. Skip it */
+ if (!i->spec)
+ break;
+
/* validate the item */
ret = cxgbe_validate_item(i, e);
if (ret)
/* validate the item */
ret = cxgbe_validate_item(i, e);
if (ret)
@@
-797,7
+801,7
@@
static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
/* Poll the FW for reply */
err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
/* Poll the FW for reply */
err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
- CXGBE_FLOW_POLL_
U
S,
+ CXGBE_FLOW_POLL_
M
S,
CXGBE_FLOW_POLL_CNT,
&ctx.completion);
if (err) {
CXGBE_FLOW_POLL_CNT,
&ctx.completion);
if (err) {
@@
-883,7
+887,7
@@
static int __cxgbe_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
/* Poll the FW for reply */
err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
/* Poll the FW for reply */
err = cxgbe_poll_for_completion(&adap->sge.fw_evtq,
- CXGBE_FLOW_POLL_
U
S,
+ CXGBE_FLOW_POLL_
M
S,
CXGBE_FLOW_POLL_CNT,
&ctx.completion);
if (err) {
CXGBE_FLOW_POLL_CNT,
&ctx.completion);
if (err) {
@@
-943,6
+947,7
@@
cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
const struct rte_flow_action *action, void *data,
struct rte_flow_error *e)
{
const struct rte_flow_action *action, void *data,
struct rte_flow_error *e)
{
+ struct adapter *adap = ethdev2adap(flow->dev);
struct ch_filter_specification fs;
struct rte_flow_query_count *c;
struct filter_entry *f;
struct ch_filter_specification fs;
struct rte_flow_query_count *c;
struct filter_entry *f;
@@
-981,6
+986,8
@@
cxgbe_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
/* Query was successful */
c->bytes_set = 1;
c->hits_set = 1;
/* Query was successful */
c->bytes_set = 1;
c->hits_set = 1;
+ if (c->reset)
+ cxgbe_clear_filter_count(adap, flow->fidx, f->fs.cap, true);
return 0; /* success / partial_success */
}
return 0; /* success / partial_success */
}