git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
net/ice: enable switch flow on DCF
[dpdk.git]
/
drivers
/
net
/
enic
/
enic_clsf.c
diff --git
a/drivers/net/enic/enic_clsf.c
b/drivers/net/enic/enic_clsf.c
index
65d0be3
..
e206123
100644
(file)
--- a/
drivers/net/enic/enic_clsf.c
+++ b/
drivers/net/enic/enic_clsf.c
@@
-55,7
+55,7
@@
void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *info)
void enic_fdir_info(struct enic *enic)
{
void enic_fdir_info(struct enic *enic)
{
- enic->fdir.modes = (u
32
)RTE_FDIR_MODE_PERFECT;
+ enic->fdir.modes = (u
int32_t
)RTE_FDIR_MODE_PERFECT;
enic->fdir.types_mask = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP |
1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
if (enic->adv_filters) {
enic->fdir.types_mask = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP |
1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
if (enic->adv_filters) {
@@
-120,7
+120,7
@@
copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
memset(gp, 0, sizeof(*gp));
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
memset(gp, 0, sizeof(*gp));
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
- struct udp_hdr udp_mask, udp_val;
+ struct
rte_
udp_hdr udp_mask, udp_val;
memset(&udp_mask, 0, sizeof(udp_mask));
memset(&udp_val, 0, sizeof(udp_val));
memset(&udp_mask, 0, sizeof(udp_mask));
memset(&udp_val, 0, sizeof(udp_val));
@@
-134,9
+134,9
@@
copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
}
enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
}
enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
- &udp_mask, &udp_val, sizeof(struct udp_hdr));
+ &udp_mask, &udp_val, sizeof(struct
rte_
udp_hdr));
} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
- struct tcp_hdr tcp_mask, tcp_val;
+ struct
rte_
tcp_hdr tcp_mask, tcp_val;
memset(&tcp_mask, 0, sizeof(tcp_mask));
memset(&tcp_val, 0, sizeof(tcp_val));
memset(&tcp_mask, 0, sizeof(tcp_mask));
memset(&tcp_val, 0, sizeof(tcp_val));
@@
-150,9
+150,9
@@
copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
}
enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
}
enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
- &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
+ &tcp_mask, &tcp_val, sizeof(struct
rte_
tcp_hdr));
} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
- struct sctp_hdr sctp_mask, sctp_val;
+ struct
rte_
sctp_hdr sctp_mask, sctp_val;
memset(&sctp_mask, 0, sizeof(sctp_mask));
memset(&sctp_val, 0, sizeof(sctp_val));
memset(&sctp_mask, 0, sizeof(sctp_mask));
memset(&sctp_val, 0, sizeof(sctp_val));
@@
-175,7
+175,7
@@
copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
* manually set proto_id=sctp below.
*/
enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
* manually set proto_id=sctp below.
*/
enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
- &sctp_val, sizeof(struct sctp_hdr));
+ &sctp_val, sizeof(struct
rte_
sctp_hdr));
}
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP ||
}
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP ||
@@
-216,7
+216,7
@@
copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
}
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
}
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
- struct udp_hdr udp_mask, udp_val;
+ struct
rte_
udp_hdr udp_mask, udp_val;
memset(&udp_mask, 0, sizeof(udp_mask));
memset(&udp_val, 0, sizeof(udp_val));
memset(&udp_mask, 0, sizeof(udp_mask));
memset(&udp_val, 0, sizeof(udp_val));
@@
-229,9
+229,9
@@
copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
udp_val.dst_port = input->flow.udp6_flow.dst_port;
}
enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
udp_val.dst_port = input->flow.udp6_flow.dst_port;
}
enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
- &udp_mask, &udp_val, sizeof(struct udp_hdr));
+ &udp_mask, &udp_val, sizeof(struct
rte_
udp_hdr));
} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
- struct tcp_hdr tcp_mask, tcp_val;
+ struct
rte_
tcp_hdr tcp_mask, tcp_val;
memset(&tcp_mask, 0, sizeof(tcp_mask));
memset(&tcp_val, 0, sizeof(tcp_val));
memset(&tcp_mask, 0, sizeof(tcp_mask));
memset(&tcp_val, 0, sizeof(tcp_val));
@@
-244,9
+244,9
@@
copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
tcp_val.dst_port = input->flow.tcp6_flow.dst_port;
}
enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
tcp_val.dst_port = input->flow.tcp6_flow.dst_port;
}
enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
- &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
+ &tcp_mask, &tcp_val, sizeof(struct
rte_
tcp_hdr));
} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
- struct sctp_hdr sctp_mask, sctp_val;
+ struct
rte_
sctp_hdr sctp_mask, sctp_val;
memset(&sctp_mask, 0, sizeof(sctp_mask));
memset(&sctp_val, 0, sizeof(sctp_val));
memset(&sctp_mask, 0, sizeof(sctp_mask));
memset(&sctp_val, 0, sizeof(sctp_val));
@@
-264,7
+264,7
@@
copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
}
enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
}
enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
- &sctp_val, sizeof(struct sctp_hdr));
+ &sctp_val, sizeof(struct
rte_
sctp_hdr));
}
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP ||
}
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP ||
@@
-337,11
+337,11
@@
int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
struct enic_fdir_node *key;
struct filter_v2 fltr;
int32_t pos;
struct enic_fdir_node *key;
struct filter_v2 fltr;
int32_t pos;
- u
8
do_free = 0;
- u
16
old_fltr_id = 0;
- u
32
flowtype_supported;
- u
16
flex_bytes;
- u
16
queue;
+ u
int8_t
do_free = 0;
+ u
int16_t
old_fltr_id = 0;
+ u
int32_t
flowtype_supported;
+ u
int16_t
flex_bytes;
+ u
int16_t
queue;
struct filter_action_v2 action;
memset(&fltr, 0, sizeof(fltr));
struct filter_action_v2 action;
memset(&fltr, 0, sizeof(fltr));
@@
-464,7
+464,7
@@
int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
void enic_clsf_destroy(struct enic *enic)
{
void enic_clsf_destroy(struct enic *enic)
{
- u
32
index;
+ u
int32_t
index;
struct enic_fdir_node *key;
/* delete classifier entries */
for (index = 0; index < ENICPMD_FDIR_MAX; index++) {
struct enic_fdir_node *key;
/* delete classifier entries */
for (index = 0; index < ENICPMD_FDIR_MAX; index++) {