git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
net/i40e: fix address of first segment
[dpdk.git]
/
drivers
/
net
/
i40e
/
i40e_flow.c
diff --git
a/drivers/net/i40e/i40e_flow.c
b/drivers/net/i40e/i40e_flow.c
index
8b2e297
..
e902a35
100644
(file)
--- a/
drivers/net/i40e/i40e_flow.c
+++ b/
drivers/net/i40e/i40e_flow.c
@@
-2035,8
+2035,8
@@
i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
}
filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
}
filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
- if (filter->ether_type == RTE_ETHER_TYPE_IP
v
4 ||
- filter->ether_type == RTE_ETHER_TYPE_IP
v
6 ||
+ if (filter->ether_type == RTE_ETHER_TYPE_IP
V
4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IP
V
6 ||
filter->ether_type == RTE_ETHER_TYPE_LLDP ||
filter->ether_type == outer_tpid) {
rte_flow_error_set(error, EINVAL,
filter->ether_type == RTE_ETHER_TYPE_LLDP ||
filter->ether_type == outer_tpid) {
rte_flow_error_set(error, EINVAL,
@@
-2442,6
+2442,7
@@
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
uint64_t input_set = I40E_INSET_NONE;
uint16_t frag_off;
enum rte_flow_item_type item_type;
uint64_t input_set = I40E_INSET_NONE;
uint16_t frag_off;
enum rte_flow_item_type item_type;
+ enum rte_flow_item_type next_type;
enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
uint32_t i, j;
enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
uint32_t i, j;
@@
-2482,6
+2483,16
@@
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
case RTE_FLOW_ITEM_TYPE_ETH:
eth_spec = item->spec;
eth_mask = item->mask;
case RTE_FLOW_ITEM_TYPE_ETH:
eth_spec = item->spec;
eth_mask = item->mask;
+ next_type = (item + 1)->type;
+
+ if (next_type == RTE_FLOW_ITEM_TYPE_END &&
+ (!eth_spec || !eth_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL eth spec/mask.");
+ return -rte_errno;
+ }
if (eth_spec && eth_mask) {
if (!rte_is_zero_ether_addr(ð_mask->src) ||
if (eth_spec && eth_mask) {
if (!rte_is_zero_ether_addr(ð_mask->src) ||
@@
-2494,8
+2505,6
@@
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
}
}
if (eth_spec && eth_mask && eth_mask->type) {
}
}
if (eth_spec && eth_mask && eth_mask->type) {
- enum rte_flow_item_type next = (item + 1)->type;
-
if (eth_mask->type != RTE_BE16(0xffff)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
if (eth_mask->type != RTE_BE16(0xffff)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@
-2506,9
+2515,9
@@
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
ether_type = rte_be_to_cpu_16(eth_spec->type);
ether_type = rte_be_to_cpu_16(eth_spec->type);
- if (next == RTE_FLOW_ITEM_TYPE_VLAN ||
- ether_type == RTE_ETHER_TYPE_IP
v
4 ||
- ether_type == RTE_ETHER_TYPE_IP
v
6 ||
+ if (next
_type
== RTE_FLOW_ITEM_TYPE_VLAN ||
+ ether_type == RTE_ETHER_TYPE_IP
V
4 ||
+ ether_type == RTE_ETHER_TYPE_IP
V
6 ||
ether_type == RTE_ETHER_TYPE_ARP ||
ether_type == outer_tpid) {
rte_flow_error_set(error, EINVAL,
ether_type == RTE_ETHER_TYPE_ARP ||
ether_type == outer_tpid) {
rte_flow_error_set(error, EINVAL,
@@
-2552,8
+2561,8
@@
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
ether_type =
rte_be_to_cpu_16(vlan_spec->inner_type);
ether_type =
rte_be_to_cpu_16(vlan_spec->inner_type);
- if (ether_type == RTE_ETHER_TYPE_IP
v
4 ||
- ether_type == RTE_ETHER_TYPE_IP
v
6 ||
+ if (ether_type == RTE_ETHER_TYPE_IP
V
4 ||
+ ether_type == RTE_ETHER_TYPE_IP
V
6 ||
ether_type == RTE_ETHER_TYPE_ARP ||
ether_type == outer_tpid) {
rte_flow_error_set(error, EINVAL,
ether_type == RTE_ETHER_TYPE_ARP ||
ether_type == outer_tpid) {
rte_flow_error_set(error, EINVAL,
@@
-3052,7
+3061,7
@@
i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
const struct rte_flow_action *act;
const struct rte_flow_action_queue *act_q;
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
const struct rte_flow_action *act;
const struct rte_flow_action_queue *act_q;
- const struct rte_flow_action_mark *mark_spec;
+ const struct rte_flow_action_mark *mark_spec
= NULL
;
uint32_t index = 0;
/* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
uint32_t index = 0;
/* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
@@
-3096,7
+3105,7
@@
i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
NEXT_ITEM_OF_ACTION(act, actions, index);
switch (act->type) {
case RTE_FLOW_ACTION_TYPE_MARK:
NEXT_ITEM_OF_ACTION(act, actions, index);
switch (act->type) {
case RTE_FLOW_ACTION_TYPE_MARK:
- if (
!
mark_spec) {
+ if (mark_spec) {
/* Double MARK actions requested */
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, act,
/* Double MARK actions requested */
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, act,
@@
-3108,7
+3117,7
@@
i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
filter->soft_id = mark_spec->id;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
filter->soft_id = mark_spec->id;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
- if (
!
mark_spec) {
+ if (mark_spec) {
/* MARK + FLAG not supported */
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, act,
/* MARK + FLAG not supported */
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, act,
@@
-3175,8
+3184,8
@@
i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
cons_filter_type = RTE_ETH_FILTER_FDIR;
cons_filter_type = RTE_ETH_FILTER_FDIR;
- if (dev->data->dev_conf.fdir_conf.mode !=
-
RTE_FDIR_MODE_PERFECT
) {
+ if (dev->data->dev_conf.fdir_conf.mode !=
RTE_FDIR_MODE_PERFECT ||
+
pf->fdir.fdir_vsi == NULL
) {
/* Enable fdir when fdir flow is added at first time. */
ret = i40e_fdir_setup(pf);
if (ret != I40E_SUCCESS) {
/* Enable fdir when fdir flow is added at first time. */
ret = i40e_fdir_setup(pf);
if (ret != I40E_SUCCESS) {
@@
-4762,7
+4771,7
@@
i40e_flow_destroy(struct rte_eth_dev *dev,
&((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
/* If the last flow is destroyed, disable fdir. */
&((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
/* If the last flow is destroyed, disable fdir. */
- if (!ret &&
!
TAILQ_EMPTY(&pf->fdir.fdir_list)) {
+ if (!ret && TAILQ_EMPTY(&pf->fdir.fdir_list)) {
i40e_fdir_teardown(pf);
dev->data->dev_conf.fdir_conf.mode =
RTE_FDIR_MODE_NONE;
i40e_fdir_teardown(pf);
dev->data->dev_conf.fdir_conf.mode =
RTE_FDIR_MODE_NONE;