git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
net: add rte prefix to IP defines
[dpdk.git]
/
drivers
/
net
/
mlx5
/
mlx5_flow_tcf.c
diff --git
a/drivers/net/mlx5/mlx5_flow_tcf.c
b/drivers/net/mlx5/mlx5_flow_tcf.c
index
e132a34
..
2270ae3
100644
(file)
--- a/
drivers/net/mlx5/mlx5_flow_tcf.c
+++ b/
drivers/net/mlx5/mlx5_flow_tcf.c
@@
-28,6
+28,7
@@
#include <rte_flow.h>
#include <rte_malloc.h>
#include <rte_common.h>
#include <rte_flow.h>
#include <rte_malloc.h>
#include <rte_common.h>
+#include <rte_cycles.h>
#include "mlx5.h"
#include "mlx5_flow.h"
#include "mlx5.h"
#include "mlx5_flow.h"
@@
-401,7
+402,7
@@
struct mlx5_flow_tcf_context {
struct tcf_neigh_rule {
LIST_ENTRY(tcf_neigh_rule) next;
uint32_t refcnt;
struct tcf_neigh_rule {
LIST_ENTRY(tcf_neigh_rule) next;
uint32_t refcnt;
- struct ether_addr eth;
+ struct
rte_
ether_addr eth;
uint16_t mask;
union {
struct {
uint16_t mask;
union {
struct {
@@
-474,8
+475,8
@@
struct flow_tcf_vxlan_encap {
uint8_t ip_tos;
uint8_t ip_ttl_hop;
struct {
uint8_t ip_tos;
uint8_t ip_ttl_hop;
struct {
- struct ether_addr dst;
- struct ether_addr src;
+ struct
rte_
ether_addr dst;
+ struct
rte_
ether_addr src;
} eth;
union {
struct {
} eth;
union {
struct {
@@
-551,7
+552,7
@@
static const struct {
},
.ipv6.hdr = {
.proto = 0xff,
},
.ipv6.hdr = {
.proto = 0xff,
- .vtc_flow = RTE_BE32(0xfful << IPV6_HDR_FL_SHIFT),
+ .vtc_flow = RTE_BE32(0xfful <<
RTE_
IPV6_HDR_FL_SHIFT),
.hop_limits = 0xff,
.src_addr =
"\xff\xff\xff\xff\xff\xff\xff\xff"
.hop_limits = 0xff,
.src_addr =
"\xff\xff\xff\xff\xff\xff\xff\xff"
@@
-688,8
+689,8
@@
flow_tcf_pedit_key_set_mac(const struct rte_flow_action *actions,
{
int idx = p_parser->sel.nkeys;
uint32_t off = actions->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
{
int idx = p_parser->sel.nkeys;
uint32_t off = actions->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
- offsetof(struct ether_hdr, s_addr) :
- offsetof(struct ether_hdr, d_addr);
+ offsetof(struct
rte_
ether_hdr, s_addr) :
+ offsetof(struct
rte_
ether_hdr, d_addr);
const struct rte_flow_action_set_mac *conf =
(const struct rte_flow_action_set_mac *)actions->conf;
const struct rte_flow_action_set_mac *conf =
(const struct rte_flow_action_set_mac *)actions->conf;
@@
-706,7
+707,7
@@
flow_tcf_pedit_key_set_mac(const struct rte_flow_action *actions,
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
memcpy(&p_parser->keys[idx].val,
conf->mac_addr + SZ_PEDIT_KEY_VAL,
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
memcpy(&p_parser->keys[idx].val,
conf->mac_addr + SZ_PEDIT_KEY_VAL,
- ETHER_ADDR_LEN - SZ_PEDIT_KEY_VAL);
+
RTE_
ETHER_ADDR_LEN - SZ_PEDIT_KEY_VAL);
p_parser->sel.nkeys = (++idx);
}
p_parser->sel.nkeys = (++idx);
}
@@
-731,12
+732,12
@@
flow_tcf_pedit_key_set_dec_ttl(const struct rte_flow_action *actions,
if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) {
p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4;
p_parser->keys[idx].off =
if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) {
p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4;
p_parser->keys[idx].off =
- offsetof(struct ipv4_hdr, time_to_live);
+ offsetof(struct
rte_
ipv4_hdr, time_to_live);
}
if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6) {
p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6;
p_parser->keys[idx].off =
}
if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6) {
p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6;
p_parser->keys[idx].off =
- offsetof(struct ipv6_hdr, hop_limits);
+ offsetof(struct
rte_
ipv6_hdr, hop_limits);
}
if (actions->type == RTE_FLOW_ACTION_TYPE_DEC_TTL) {
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_ADD;
}
if (actions->type == RTE_FLOW_ACTION_TYPE_DEC_TTL) {
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_ADD;
@@
-800,8
+801,8
@@
flow_tcf_pedit_key_set_ipv6_addr(const struct rte_flow_action *actions,
int keys = NUM_OF_PEDIT_KEYS(IPV6_ADDR_LEN);
int off_base =
actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
int keys = NUM_OF_PEDIT_KEYS(IPV6_ADDR_LEN);
int off_base =
actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
- offsetof(struct ipv6_hdr, src_addr) :
- offsetof(struct ipv6_hdr, dst_addr);
+ offsetof(struct
rte_
ipv6_hdr, src_addr) :
+ offsetof(struct
rte_
ipv6_hdr, dst_addr);
const struct rte_flow_action_set_ipv6 *conf =
(const struct rte_flow_action_set_ipv6 *)actions->conf;
const struct rte_flow_action_set_ipv6 *conf =
(const struct rte_flow_action_set_ipv6 *)actions->conf;
@@
-835,8
+836,8
@@
flow_tcf_pedit_key_set_ipv4_addr(const struct rte_flow_action *actions,
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
p_parser->keys[idx].off =
actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
p_parser->keys[idx].off =
actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
- offsetof(struct ipv4_hdr, src_addr) :
- offsetof(struct ipv4_hdr, dst_addr);
+ offsetof(struct
rte_
ipv4_hdr, src_addr) :
+ offsetof(struct
rte_
ipv4_hdr, dst_addr);
p_parser->keys[idx].mask = ~UINT32_MAX;
p_parser->keys[idx].val =
((const struct rte_flow_action_set_ipv4 *)
p_parser->keys[idx].mask = ~UINT32_MAX;
p_parser->keys[idx].val =
((const struct rte_flow_action_set_ipv4 *)
@@
-983,11
+984,11
@@
flow_tcf_get_pedit_actions_size(const struct rte_flow_action **actions,
flags |= MLX5_FLOW_ACTION_DEC_TTL;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
flags |= MLX5_FLOW_ACTION_DEC_TTL;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
- keys += NUM_OF_PEDIT_KEYS(ETHER_ADDR_LEN);
+ keys += NUM_OF_PEDIT_KEYS(
RTE_
ETHER_ADDR_LEN);
flags |= MLX5_FLOW_ACTION_SET_MAC_SRC;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
flags |= MLX5_FLOW_ACTION_SET_MAC_SRC;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
- keys += NUM_OF_PEDIT_KEYS(ETHER_ADDR_LEN);
+ keys += NUM_OF_PEDIT_KEYS(
RTE_
ETHER_ADDR_LEN);
flags |= MLX5_FLOW_ACTION_SET_MAC_DST;
break;
default:
flags |= MLX5_FLOW_ACTION_SET_MAC_DST;
break;
default:
@@
-1425,7
+1426,7
@@
flow_tcf_validate_vxlan_encap_ipv6(const struct rte_flow_item *item,
" vxlan encapsulation");
}
msk6 = (rte_be_to_cpu_32(mask->hdr.vtc_flow) >>
" vxlan encapsulation");
}
msk6 = (rte_be_to_cpu_32(mask->hdr.vtc_flow) >>
- IPV6_HDR_TC_SHIFT) & 0xff;
+
RTE_
IPV6_HDR_TC_SHIFT) & 0xff;
if (msk6 && msk6 != 0xff)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
if (msk6 && msk6 != 0xff)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
@@
-2425,7
+2426,7
@@
flow_tcf_validate(struct rte_eth_dev *dev,
*/
if ((action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
(action_flags & MLX5_FLOW_ACTION_PORT_ID) &&
*/
if ((action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
(action_flags & MLX5_FLOW_ACTION_PORT_ID) &&
- ((struct priv *)port_id_dev->data->dev_private)->representor)
+ ((struct
mlx5_
priv *)port_id_dev->data->dev_private)->representor)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, actions,
"vlan push can only be applied"
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, actions,
"vlan push can only be applied"
@@
-2520,7
+2521,7
@@
flow_tcf_get_items_size(const struct rte_flow_attr *attr,
case RTE_FLOW_ITEM_TYPE_PORT_ID:
break;
case RTE_FLOW_ITEM_TYPE_ETH:
case RTE_FLOW_ITEM_TYPE_PORT_ID:
break;
case RTE_FLOW_ITEM_TYPE_ETH:
- size += SZ_NLATTR_DATA_OF(ETHER_ADDR_LEN) * 4;
+ size += SZ_NLATTR_DATA_OF(
RTE_
ETHER_ADDR_LEN) * 4;
/* dst/src MAC addr and mask. */
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
/* dst/src MAC addr and mask. */
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
@@
-2550,7
+2551,7
@@
flow_tcf_get_items_size(const struct rte_flow_attr *attr,
if (ipv6 && ipv6->hdr.hop_limits)
size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
if (ipv6 && (rte_be_to_cpu_32(ipv6->hdr.vtc_flow) &
if (ipv6 && ipv6->hdr.hop_limits)
size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
if (ipv6 && (rte_be_to_cpu_32(ipv6->hdr.vtc_flow) &
- (0xfful << IPV6_HDR_TC_SHIFT)))
+ (0xfful <<
RTE_
IPV6_HDR_TC_SHIFT)))
size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
break;
}
size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
break;
}
@@
-2638,7
+2639,7
@@
flow_tcf_vxlan_encap_size(const struct rte_flow_action *action)
if (ipv6 && ipv6->hdr.hop_limits)
size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
if (ipv6 && (rte_be_to_cpu_32(ipv6->hdr.vtc_flow) &
if (ipv6 && ipv6->hdr.hop_limits)
size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
if (ipv6 && (rte_be_to_cpu_32(ipv6->hdr.vtc_flow) &
- (0xfful << IPV6_HDR_TC_SHIFT)))
+ (0xfful <<
RTE_
IPV6_HDR_TC_SHIFT)))
size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
break;
}
size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
break;
}
@@
-2915,7
+2916,7
@@
flow_tcf_translate_action_count(struct rte_eth_dev *dev __rte_unused,
* VXLAN VNI in 24-bit wire format.
*
* @return
* VXLAN VNI in 24-bit wire format.
*
* @return
- * VXLAN VNI as a 32-bit integer value in network endian.
+ * VXLAN VNI as a 32-bit integer value in network endian
ness
.
*/
static inline rte_be32_t
vxlan_vni_as_be32(const uint8_t vni[3])
*/
static inline rte_be32_t
vxlan_vni_as_be32(const uint8_t vni[3])
@@
-3036,11
+3037,11
@@
flow_tcf_parse_vxlan_encap_ipv6(const struct rte_flow_item_ipv6 *spec,
FLOW_TCF_ENCAP_IPV6_DST;
if (mask) {
if ((rte_be_to_cpu_32(mask->hdr.vtc_flow) >>
FLOW_TCF_ENCAP_IPV6_DST;
if (mask) {
if ((rte_be_to_cpu_32(mask->hdr.vtc_flow) >>
- IPV6_HDR_TC_SHIFT) & 0xff) {
+
RTE_
IPV6_HDR_TC_SHIFT) & 0xff) {
encap->mask |= FLOW_TCF_ENCAP_IP_TOS;
encap->ip_tos = (rte_be_to_cpu_32
(spec->hdr.vtc_flow) >>
encap->mask |= FLOW_TCF_ENCAP_IP_TOS;
encap->ip_tos = (rte_be_to_cpu_32
(spec->hdr.vtc_flow) >>
- IPV6_HDR_TC_SHIFT) & 0xff;
+
RTE_
IPV6_HDR_TC_SHIFT) & 0xff;
}
if (mask->hdr.hop_limits) {
encap->mask |= FLOW_TCF_ENCAP_IP_TTL;
}
if (mask->hdr.hop_limits) {
encap->mask |= FLOW_TCF_ENCAP_IP_TTL;
@@
-3333,20
+3334,20
@@
flow_tcf_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
" parameter is ignored");
break;
}
" parameter is ignored");
break;
}
- if (!is_zero_ether_addr(&mask.eth->dst)) {
+ if (!
rte_
is_zero_ether_addr(&mask.eth->dst)) {
mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_DST,
mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_DST,
- ETHER_ADDR_LEN,
+
RTE_
ETHER_ADDR_LEN,
spec.eth->dst.addr_bytes);
mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_DST_MASK,
spec.eth->dst.addr_bytes);
mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_DST_MASK,
- ETHER_ADDR_LEN,
+
RTE_
ETHER_ADDR_LEN,
mask.eth->dst.addr_bytes);
}
mask.eth->dst.addr_bytes);
}
- if (!is_zero_ether_addr(&mask.eth->src)) {
+ if (!
rte_
is_zero_ether_addr(&mask.eth->src)) {
mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_SRC,
mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_SRC,
- ETHER_ADDR_LEN,
+
RTE_
ETHER_ADDR_LEN,
spec.eth->src.addr_bytes);
mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_SRC_MASK,
spec.eth->src.addr_bytes);
mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_SRC_MASK,
- ETHER_ADDR_LEN,
+
RTE_
ETHER_ADDR_LEN,
mask.eth->src.addr_bytes);
}
assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
mask.eth->src.addr_bytes);
}
assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
@@
-3589,11
+3590,11
@@
flow_tcf_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
mask.ipv6->hdr.hop_limits);
}
msk6 = (rte_be_to_cpu_32(mask.ipv6->hdr.vtc_flow) >>
mask.ipv6->hdr.hop_limits);
}
msk6 = (rte_be_to_cpu_32(mask.ipv6->hdr.vtc_flow) >>
- IPV6_HDR_TC_SHIFT) & 0xff;
+
RTE_
IPV6_HDR_TC_SHIFT) & 0xff;
if (msk6) {
tos6 = (rte_be_to_cpu_32
(spec.ipv6->hdr.vtc_flow) >>
if (msk6) {
tos6 = (rte_be_to_cpu_32
(spec.ipv6->hdr.vtc_flow) >>
- IPV6_HDR_TC_SHIFT) & 0xff;
+
RTE_
IPV6_HDR_TC_SHIFT) & 0xff;
mnl_attr_put_u8
(nlh, tunnel_outer ?
TCA_FLOWER_KEY_ENC_IP_TOS :
mnl_attr_put_u8
(nlh, tunnel_outer ?
TCA_FLOWER_KEY_ENC_IP_TOS :
@@
-4050,7
+4051,7
@@
flow_tcf_nl_ack(struct mlx5_flow_tcf_context *tcf,
nlh->nlmsg_flags |= NLM_F_ACK;
ret = mnl_socket_sendto(tcf->nl, nlh, nlh->nlmsg_len);
if (ret <= 0) {
nlh->nlmsg_flags |= NLM_F_ACK;
ret = mnl_socket_sendto(tcf->nl, nlh, nlh->nlmsg_len);
if (ret <= 0) {
- /* Message send error occurre
s
. */
+ /* Message send error occurre
d
. */
rte_errno = errno;
return -rte_errno;
}
rte_errno = errno;
return -rte_errno;
}
@@
-4306,7
+4307,7
@@
flow_tcf_collect_local_cb(const struct nlmsghdr *nlh, void *arg)
* @param[in] tcf
* Context object initialized by mlx5_flow_tcf_context_create().
* @param[in] ifindex
* @param[in] tcf
* Context object initialized by mlx5_flow_tcf_context_create().
* @param[in] ifindex
- * Network in
f
erface index to perform cleanup.
+ * Network in
t
erface index to perform cleanup.
*/
static void
flow_tcf_encap_local_cleanup(struct mlx5_flow_tcf_context *tcf,
*/
static void
flow_tcf_encap_local_cleanup(struct mlx5_flow_tcf_context *tcf,
@@
-4342,7
+4343,7
@@
flow_tcf_encap_local_cleanup(struct mlx5_flow_tcf_context *tcf,
}
/**
}
/**
- * Collect neigh perma
m
ent rules on specified network device.
+ * Collect neigh perma
n
ent rules on specified network device.
* This is callback routine called by libmnl mnl_cb_run() in loop for
* every message in received packet.
*
* This is callback routine called by libmnl mnl_cb_run() in loop for
* every message in received packet.
*
@@
-4391,10
+4392,10
@@
flow_tcf_collect_neigh_cb(const struct nlmsghdr *nlh, void *arg)
}
if (!na_mac || !na_ip)
return 1;
}
if (!na_mac || !na_ip)
return 1;
- /* Neigh rule with perm
e
nent attribute found. */
+ /* Neigh rule with perm
a
nent attribute found. */
size = MNL_ALIGN(sizeof(struct nlmsghdr)) +
MNL_ALIGN(sizeof(struct ndmsg)) +
size = MNL_ALIGN(sizeof(struct nlmsghdr)) +
MNL_ALIGN(sizeof(struct ndmsg)) +
- SZ_NLATTR_DATA_OF(ETHER_ADDR_LEN) +
+ SZ_NLATTR_DATA_OF(
RTE_
ETHER_ADDR_LEN) +
(family == AF_INET6 ? SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN)
: SZ_NLATTR_TYPE_OF(uint32_t));
cmd = flow_tcf_alloc_nlcmd(ctx, size);
(family == AF_INET6 ? SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN)
: SZ_NLATTR_TYPE_OF(uint32_t));
cmd = flow_tcf_alloc_nlcmd(ctx, size);
@@
-4418,7
+4419,7
@@
flow_tcf_collect_neigh_cb(const struct nlmsghdr *nlh, void *arg)
mnl_attr_put(cmd, NDA_DST, IPV6_ADDR_LEN,
mnl_attr_get_payload(na_ip));
}
mnl_attr_put(cmd, NDA_DST, IPV6_ADDR_LEN,
mnl_attr_get_payload(na_ip));
}
- mnl_attr_put(cmd, NDA_LLADDR, ETHER_ADDR_LEN,
+ mnl_attr_put(cmd, NDA_LLADDR,
RTE_
ETHER_ADDR_LEN,
mnl_attr_get_payload(na_mac));
assert(size == cmd->nlmsg_len);
return 1;
mnl_attr_get_payload(na_mac));
assert(size == cmd->nlmsg_len);
return 1;
@@
-4430,7
+4431,7
@@
flow_tcf_collect_neigh_cb(const struct nlmsghdr *nlh, void *arg)
* @param[in] tcf
* Context object initialized by mlx5_flow_tcf_context_create().
* @param[in] ifindex
* @param[in] tcf
* Context object initialized by mlx5_flow_tcf_context_create().
* @param[in] ifindex
- * Network in
f
erface index to perform cleanup.
+ * Network in
t
erface index to perform cleanup.
*/
static void
flow_tcf_encap_neigh_cleanup(struct mlx5_flow_tcf_context *tcf,
*/
static void
flow_tcf_encap_neigh_cleanup(struct mlx5_flow_tcf_context *tcf,
@@
-4598,7
+4599,7
@@
flow_tcf_encap_iface_cleanup(struct mlx5_flow_tcf_context *tcf,
* Note that an implicit route is maintained by the kernel due to the
* presence of a peer address (IFA_ADDRESS).
*
* Note that an implicit route is maintained by the kernel due to the
* presence of a peer address (IFA_ADDRESS).
*
- * These rules are used for encapsultion only and allow to assign
+ * These rules are used for encapsul
a
tion only and allow to assign
* the outer tunnel source IP address.
*
* @param[in] tcf
* the outer tunnel source IP address.
*
* @param[in] tcf
@@
-5017,7
+5018,7
@@
flow_tcf_encap_irule_acquire(struct mlx5_flow_tcf_context *tcf,
/**
* Releases VXLAN encap rules container by pointer. Decrements the
/**
* Releases VXLAN encap rules container by pointer. Decrements the
- * reference co
i
nter and deletes the container if counter is zero.
+ * reference co
u
nter and deletes the container if counter is zero.
*
* @param[in] irule
* VXLAN rule container pointer to release.
*
* @param[in] irule
* VXLAN rule container pointer to release.
@@
-5041,7
+5042,7
@@
flow_tcf_encap_irule_release(struct tcf_irule *iface)
* @param[in] tcf
* Context object initialized by mlx5_flow_tcf_context_create().
* @param[in] vtep
* @param[in] tcf
* Context object initialized by mlx5_flow_tcf_context_create().
* @param[in] vtep
- * Object repres
i
nting the network device to delete. Memory
+ * Object repres
e
nting the network device to delete. Memory
* allocated for this object is freed by routine.
*/
static void
* allocated for this object is freed by routine.
*/
static void
@@
-5267,7
+5268,7
@@
flow_tcf_decap_vtep_acquire(struct mlx5_flow_tcf_context *tcf,
}
/**
}
/**
- * A
qc
uire target interface index for VXLAN tunneling encapsulation.
+ * A
cq
uire target interface index for VXLAN tunneling encapsulation.
*
* @param[in] tcf
* Context object initialized by mlx5_flow_tcf_context_create().
*
* @param[in] tcf
* Context object initialized by mlx5_flow_tcf_context_create().
@@
-5565,7
+5566,7
@@
flow_tcf_check_inhw(struct mlx5_flow_tcf_context *tcf,
static void
flow_tcf_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
static void
flow_tcf_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct priv *priv = dev->data->dev_private;
+ struct
mlx5_
priv *priv = dev->data->dev_private;
struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
struct mlx5_flow *dev_flow;
struct nlmsghdr *nlh;
struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
struct mlx5_flow *dev_flow;
struct nlmsghdr *nlh;
@@
-5600,7
+5601,7
@@
flow_tcf_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
/**
* Fetch the applied rule handle. This is callback routine called by
* libmnl mnl_cb_run() in loop for every message in received packet.
/**
* Fetch the applied rule handle. This is callback routine called by
* libmnl mnl_cb_run() in loop for every message in received packet.
- * When the NLM_F_ECHO flag i
s
specified the kernel sends the created
+ * When the NLM_F_ECHO flag i
s
specified the kernel sends the created
* rule descriptor back to the application and we can retrieve the
* actual rule handle from updated descriptor.
*
* rule descriptor back to the application and we can retrieve the
* actual rule handle from updated descriptor.
*
@@
-5654,7
+5655,7
@@
static int
flow_tcf_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error)
{
flow_tcf_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct
mlx5_
priv *priv = dev->data->dev_private;
struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
struct mlx5_flow *dev_flow;
struct nlmsghdr *nlh;
struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
struct mlx5_flow *dev_flow;
struct nlmsghdr *nlh;
@@
-6138,7
+6139,7
@@
flow_tcf_query_count(struct rte_eth_dev *dev,
{
struct flow_tcf_stats_basic sb_data;
struct rte_flow_query_count *qc = data;
{
struct flow_tcf_stats_basic sb_data;
struct rte_flow_query_count *qc = data;
- struct priv *priv = dev->data->dev_private;
+ struct
mlx5_
priv *priv = dev->data->dev_private;
struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
struct mnl_socket *nl = ctx->nl;
struct mlx5_flow *dev_flow;
struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
struct mnl_socket *nl = ctx->nl;
struct mlx5_flow *dev_flow;