git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
net/i40e: fix flow RSS configuration error
[dpdk.git]
/
drivers
/
net
/
i40e
/
i40e_flow.c
diff --git
a/drivers/net/i40e/i40e_flow.c
b/drivers/net/i40e/i40e_flow.c
index
cd9a9b6
..
69d1ba5
100644
(file)
--- a/
drivers/net/i40e/i40e_flow.c
+++ b/
drivers/net/i40e/i40e_flow.c
@@
-11,7
+11,7
@@
#include <stdarg.h>
#include <rte_ether.h>
#include <stdarg.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev
_driver
.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_eth_ctrl.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_eth_ctrl.h>
@@
-1989,8
+1989,8
@@
i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
item_type = item->type;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
item_type = item->type;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- eth_spec =
(const struct rte_flow_item_eth *)
item->spec;
- eth_mask =
(const struct rte_flow_item_eth *)
item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
/* Get the MAC info. */
if (!eth_spec || !eth_mask) {
rte_flow_error_set(error, EINVAL,
/* Get the MAC info. */
if (!eth_spec || !eth_mask) {
rte_flow_error_set(error, EINVAL,
@@
-2075,7
+2075,7
@@
i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
}
if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
}
if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
- act_q =
(const struct rte_flow_action_queue *)
act->conf;
+ act_q = act->conf;
filter->queue = act_q->index;
if (filter->queue >= pf->dev_data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
filter->queue = act_q->index;
if (filter->queue >= pf->dev_data->nb_rx_queues) {
rte_flow_error_set(error, EINVAL,
@@
-2260,6
+2260,7
@@
i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
(raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
(layer_idx * I40E_MAX_FLXPLD_FIED);
I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
(raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
(layer_idx * I40E_MAX_FLXPLD_FIED);
I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
+ i40e_global_cfg_warning(I40E_WARNING_ENA_FLX_PLD);
}
/* Set flex pit */
}
/* Set flex pit */
@@
-2477,8
+2478,8
@@
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
item_type = item->type;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
item_type = item->type;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- eth_spec =
(const struct rte_flow_item_eth *)
item->spec;
- eth_mask =
(const struct rte_flow_item_eth *)
item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
if (eth_spec && eth_mask) {
if (!is_zero_ether_addr(ð_mask->src) ||
if (eth_spec && eth_mask) {
if (!is_zero_ether_addr(ð_mask->src) ||
@@
-2515,10
+2516,8
@@
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- vlan_spec =
- (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask =
- (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
if (vlan_spec && vlan_mask) {
if (vlan_mask->tci ==
rte_cpu_to_be_16(I40E_TCI_MASK)) {
if (vlan_spec && vlan_mask) {
if (vlan_mask->tci ==
rte_cpu_to_be_16(I40E_TCI_MASK)) {
@@
-2534,10
+2533,8
@@
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
l3 = RTE_FLOW_ITEM_TYPE_IPV4;
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
l3 = RTE_FLOW_ITEM_TYPE_IPV4;
- ipv4_spec =
- (const struct rte_flow_item_ipv4 *)item->spec;
- ipv4_mask =
- (const struct rte_flow_item_ipv4 *)item->mask;
+ ipv4_spec = item->spec;
+ ipv4_mask = item->mask;
pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
layer_idx = I40E_FLXPLD_L3_IDX;
pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
layer_idx = I40E_FLXPLD_L3_IDX;
@@
-2602,10
+2599,8
@@
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
l3 = RTE_FLOW_ITEM_TYPE_IPV6;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
l3 = RTE_FLOW_ITEM_TYPE_IPV6;
- ipv6_spec =
- (const struct rte_flow_item_ipv6 *)item->spec;
- ipv6_mask =
- (const struct rte_flow_item_ipv6 *)item->mask;
+ ipv6_spec = item->spec;
+ ipv6_mask = item->mask;
pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
layer_idx = I40E_FLXPLD_L3_IDX;
pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
layer_idx = I40E_FLXPLD_L3_IDX;
@@
-2673,8
+2668,8
@@
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
outer_ip = false;
break;
case RTE_FLOW_ITEM_TYPE_TCP:
outer_ip = false;
break;
case RTE_FLOW_ITEM_TYPE_TCP:
- tcp_spec =
(const struct rte_flow_item_tcp *)
item->spec;
- tcp_mask =
(const struct rte_flow_item_tcp *)
item->mask;
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
pctype =
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
pctype =
@@
-2721,8
+2716,8
@@
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_UDP:
break;
case RTE_FLOW_ITEM_TYPE_UDP:
- udp_spec =
(const struct rte_flow_item_udp *)
item->spec;
- udp_mask =
(const struct rte_flow_item_udp *)
item->mask;
+ udp_spec = item->spec;
+ udp_mask = item->mask;
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
pctype =
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
pctype =
@@
-2774,8
+2769,8
@@
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
return -rte_errno;
}
return -rte_errno;
}
- gtp_spec =
(const struct rte_flow_item_gtp *)
item->spec;
- gtp_mask =
(const struct rte_flow_item_gtp *)
item->mask;
+ gtp_spec = item->spec;
+ gtp_mask = item->mask;
if (gtp_spec && gtp_mask) {
if (gtp_mask->v_pt_rsv_flags ||
if (gtp_spec && gtp_mask) {
if (gtp_mask->v_pt_rsv_flags ||
@@
-2796,10
+2791,8
@@
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
}
break;
case RTE_FLOW_ITEM_TYPE_SCTP:
}
break;
case RTE_FLOW_ITEM_TYPE_SCTP:
- sctp_spec =
- (const struct rte_flow_item_sctp *)item->spec;
- sctp_mask =
- (const struct rte_flow_item_sctp *)item->mask;
+ sctp_spec = item->spec;
+ sctp_mask = item->mask;
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
pctype =
if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
pctype =
@@
-2847,8
+2840,8
@@
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_RAW:
break;
case RTE_FLOW_ITEM_TYPE_RAW:
- raw_spec =
(const struct rte_flow_item_raw *)
item->spec;
- raw_mask =
(const struct rte_flow_item_raw *)
item->mask;
+ raw_spec = item->spec;
+ raw_mask = item->mask;
if (!raw_spec || !raw_mask) {
rte_flow_error_set(error, EINVAL,
if (!raw_spec || !raw_mask) {
rte_flow_error_set(error, EINVAL,
@@
-2858,6
+2851,14
@@
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
return -rte_errno;
}
return -rte_errno;
}
+ if (pf->support_multi_driver) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported flexible payload.");
+ return -rte_errno;
+ }
+
ret = i40e_flow_check_raw_item(item, raw_spec, error);
if (ret < 0)
return ret;
ret = i40e_flow_check_raw_item(item, raw_spec, error);
if (ret < 0)
return ret;
@@
-2916,7
+2917,7
@@
i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
raw_id++;
break;
case RTE_FLOW_ITEM_TYPE_VF:
raw_id++;
break;
case RTE_FLOW_ITEM_TYPE_VF:
- vf_spec =
(const struct rte_flow_item_vf *)
item->spec;
+ vf_spec = item->spec;
filter->input.flow_ext.is_vf = 1;
filter->input.flow_ext.dst_id = vf_spec->id;
if (filter->input.flow_ext.is_vf &&
filter->input.flow_ext.is_vf = 1;
filter->input.flow_ext.dst_id = vf_spec->id;
if (filter->input.flow_ext.is_vf &&
@@
-3008,7
+3009,7
@@
i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
NEXT_ITEM_OF_ACTION(act, actions, index);
switch (act->type) {
case RTE_FLOW_ACTION_TYPE_QUEUE:
NEXT_ITEM_OF_ACTION(act, actions, index);
switch (act->type) {
case RTE_FLOW_ACTION_TYPE_QUEUE:
- act_q =
(const struct rte_flow_action_queue *)
act->conf;
+ act_q = act->conf;
filter->action.rx_queue = act_q->index;
if ((!filter->input.flow_ext.is_vf &&
filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
filter->action.rx_queue = act_q->index;
if ((!filter->input.flow_ext.is_vf &&
filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
@@
-3039,7
+3040,7
@@
i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
NEXT_ITEM_OF_ACTION(act, actions, index);
switch (act->type) {
case RTE_FLOW_ACTION_TYPE_MARK:
NEXT_ITEM_OF_ACTION(act, actions, index);
switch (act->type) {
case RTE_FLOW_ACTION_TYPE_MARK:
- mark_spec =
(const struct rte_flow_action_mark *)
act->conf;
+ mark_spec = act->conf;
filter->action.report_status = I40E_FDIR_REPORT_ID;
filter->soft_id = mark_spec->id;
break;
filter->action.report_status = I40E_FDIR_REPORT_ID;
filter->soft_id = mark_spec->id;
break;
@@
-3130,7
+3131,7
@@
i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
}
if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
}
if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
- act_vf =
(const struct rte_flow_action_vf *)
act->conf;
+ act_vf = act->conf;
filter->vf_id = act_vf->id;
filter->is_to_vf = 1;
if (filter->vf_id >= pf->vf_num) {
filter->vf_id = act_vf->id;
filter->is_to_vf = 1;
if (filter->vf_id >= pf->vf_num) {
@@
-3145,7
+3146,7
@@
i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
index++;
NEXT_ITEM_OF_ACTION(act, actions, index);
if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
index++;
NEXT_ITEM_OF_ACTION(act, actions, index);
if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
- act_q =
(const struct rte_flow_action_queue *)
act->conf;
+ act_q = act->conf;
filter->queue_id = act_q->index;
if ((!filter->is_to_vf) &&
(filter->queue_id >= pf->dev_data->nb_rx_queues)) {
filter->queue_id = act_q->index;
if ((!filter->is_to_vf) &&
(filter->queue_id >= pf->dev_data->nb_rx_queues)) {
@@
-3237,8
+3238,8
@@
i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
item_type = item->type;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
item_type = item->type;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- eth_spec =
(const struct rte_flow_item_eth *)
item->spec;
- eth_mask =
(const struct rte_flow_item_eth *)
item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
/* Check if ETH item is used for place holder.
* If yes, both spec and mask should be NULL.
/* Check if ETH item is used for place holder.
* If yes, both spec and mask should be NULL.
@@
-3281,10
+3282,8
@@
i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
}
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
}
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- vlan_spec =
- (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask =
- (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
if (!(vlan_spec && vlan_mask)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
if (!(vlan_spec && vlan_mask)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@
-3341,10
+3340,8
@@
i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
}
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
}
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
- vxlan_spec =
- (const struct rte_flow_item_vxlan *)item->spec;
- vxlan_mask =
- (const struct rte_flow_item_vxlan *)item->mask;
+ vxlan_spec = item->spec;
+ vxlan_mask = item->mask;
/* Check if VXLAN item is used to describe protocol.
* If yes, both spec and mask should be NULL.
* If no, both spec and mask shouldn't be NULL.
/* Check if VXLAN item is used to describe protocol.
* If yes, both spec and mask should be NULL.
* If no, both spec and mask shouldn't be NULL.
@@
-3470,8
+3467,8
@@
i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
item_type = item->type;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
item_type = item->type;
switch (item_type) {
case RTE_FLOW_ITEM_TYPE_ETH:
- eth_spec =
(const struct rte_flow_item_eth *)
item->spec;
- eth_mask =
(const struct rte_flow_item_eth *)
item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
/* Check if ETH item is used for place holder.
* If yes, both spec and mask should be NULL.
/* Check if ETH item is used for place holder.
* If yes, both spec and mask should be NULL.
@@
-3515,10
+3512,8
@@
i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- vlan_spec =
- (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask =
- (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
if (!(vlan_spec && vlan_mask)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
if (!(vlan_spec && vlan_mask)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
@@
-3563,10
+3558,8
@@
i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
}
break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
}
break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
- nvgre_spec =
- (const struct rte_flow_item_nvgre *)item->spec;
- nvgre_mask =
- (const struct rte_flow_item_nvgre *)item->mask;
+ nvgre_spec = item->spec;
+ nvgre_mask = item->mask;
/* Check if NVGRE item is used to describe protocol.
* If yes, both spec and mask should be NULL.
* If no, both spec and mask shouldn't be NULL.
/* Check if NVGRE item is used to describe protocol.
* If yes, both spec and mask should be NULL.
* If no, both spec and mask shouldn't be NULL.
@@
-3777,10
+3770,8
@@
i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
}
break;
case RTE_FLOW_ITEM_TYPE_MPLS:
}
break;
case RTE_FLOW_ITEM_TYPE_MPLS:
- mpls_spec =
- (const struct rte_flow_item_mpls *)item->spec;
- mpls_mask =
- (const struct rte_flow_item_mpls *)item->mask;
+ mpls_spec = item->spec;
+ mpls_mask = item->mask;
if (!mpls_spec || !mpls_mask) {
rte_flow_error_set(error, EINVAL,
if (!mpls_spec || !mpls_mask) {
rte_flow_error_set(error, EINVAL,
@@
-3916,10
+3907,8
@@
i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
break;
case RTE_FLOW_ITEM_TYPE_GTPC:
case RTE_FLOW_ITEM_TYPE_GTPU:
break;
case RTE_FLOW_ITEM_TYPE_GTPC:
case RTE_FLOW_ITEM_TYPE_GTPU:
- gtp_spec =
- (const struct rte_flow_item_gtp *)item->spec;
- gtp_mask =
- (const struct rte_flow_item_gtp *)item->mask;
+ gtp_spec = item->spec;
+ gtp_mask = item->mask;
if (!gtp_spec || !gtp_mask) {
rte_flow_error_set(error, EINVAL,
if (!gtp_spec || !gtp_mask) {
rte_flow_error_set(error, EINVAL,
@@
-4030,10
+4019,8
@@
i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
}
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
}
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- vlan_spec =
- (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask =
- (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
if (!(vlan_spec && vlan_mask)) {
rte_flow_error_set(error, EINVAL,
if (!(vlan_spec && vlan_mask)) {
rte_flow_error_set(error, EINVAL,
@@
-4157,10
+4144,8
@@
i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
*action_flag = 1;
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
*action_flag = 1;
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
- vlan_spec =
- (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask =
- (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
if (vlan_spec && vlan_mask) {
if (vlan_mask->tci ==
rte_cpu_to_be_16(I40E_TCI_MASK)) {
if (vlan_spec && vlan_mask) {
if (vlan_mask->tci ==
rte_cpu_to_be_16(I40E_TCI_MASK)) {
@@
-4188,7
+4173,7
@@
static int
i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
struct rte_flow_error *error,
i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
const struct rte_flow_action *actions,
struct rte_flow_error *error,
- uint8_t
*
action_flag,
+ uint8_t action_flag,
struct i40e_queue_regions *conf_info,
union i40e_filter_t *filter)
{
struct i40e_queue_regions *conf_info,
union i40e_filter_t *filter)
{
@@
-4201,9
+4186,10
@@
i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
uint16_t i, j, n, tmp;
uint32_t index = 0;
struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
uint16_t i, j, n, tmp;
uint32_t index = 0;
+ uint64_t hf_bit = 1;
NEXT_ITEM_OF_ACTION(act, actions, index);
NEXT_ITEM_OF_ACTION(act, actions, index);
- rss =
(const struct rte_flow_action_rss *)
act->conf;
+ rss = act->conf;
/**
* rss only supports forwarding,
/**
* rss only supports forwarding,
@@
-4219,7
+4205,7
@@
i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
if (action_flag) {
for (n = 0; n < 64; n++) {
if (action_flag) {
for (n = 0; n < 64; n++) {
- if (rss->rss_conf->rss_hf & (
1
<< n)) {
+ if (rss->rss_conf->rss_hf & (
hf_bit
<< n)) {
conf_info->region[0].hw_flowtype[0] = n;
conf_info->region[0].flowtype_num = 1;
conf_info->queue_region_number = 1;
conf_info->region[0].hw_flowtype[0] = n;
conf_info->region[0].flowtype_num = 1;
conf_info->queue_region_number = 1;
@@
-4313,9
+4299,11
@@
i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
}
rss_config->queue_region_conf = TRUE;
}
rss_config->queue_region_conf = TRUE;
- return 0;
}
}
+ if (rss_config->queue_region_conf)
+ return 0;
+
if (!rss || !rss->num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
if (!rss || !rss->num) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
@@
-4378,7
+4366,7
@@
i40e_parse_rss_filter(struct rte_eth_dev *dev,
return ret;
ret = i40e_flow_parse_rss_action(dev, actions, error,
return ret;
ret = i40e_flow_parse_rss_action(dev, actions, error,
-
&
action_flag, &info, filter);
+ action_flag, &info, filter);
if (ret)
return ret;
if (ret)
return ret;
@@
-4397,14
+4385,15
@@
i40e_config_rss_filter_set(struct rte_eth_dev *dev,
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
if (conf->queue_region_conf) {
if (conf->queue_region_conf) {
- i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
+
ret =
i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
conf->queue_region_conf = 0;
} else {
conf->queue_region_conf = 0;
} else {
- i40e_config_rss_filter(pf, conf, 1);
+
ret =
i40e_config_rss_filter(pf, conf, 1);
}
}
- return
0
;
+ return
ret
;
}
static int
}
static int
@@
-4557,6
+4546,8
@@
i40e_flow_create(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_HASH:
ret = i40e_config_rss_filter_set(dev,
&cons_filter.rss_conf);
case RTE_ETH_FILTER_HASH:
ret = i40e_config_rss_filter_set(dev,
&cons_filter.rss_conf);
+ if (ret)
+ goto free_flow;
flow->rule = &pf->rss_info;
break;
default:
flow->rule = &pf->rss_info;
break;
default: