#include <rte_flow.h>
#include <rte_malloc.h>
#include <rte_common.h>
+#include <rte_cycles.h>
#include "mlx5.h"
#include "mlx5_flow.h"
struct tcf_neigh_rule {
LIST_ENTRY(tcf_neigh_rule) next;
uint32_t refcnt;
- struct ether_addr eth;
+ struct rte_ether_addr eth;
uint16_t mask;
union {
struct {
uint8_t ip_tos;
uint8_t ip_ttl_hop;
struct {
- struct ether_addr dst;
- struct ether_addr src;
+ struct rte_ether_addr dst;
+ struct rte_ether_addr src;
} eth;
union {
struct {
},
.ipv6.hdr = {
.proto = 0xff,
- .vtc_flow = RTE_BE32(0xfful << IPV6_HDR_FL_SHIFT),
+ .vtc_flow = RTE_BE32(0xfful << RTE_IPV6_HDR_FL_SHIFT),
.hop_limits = 0xff,
.src_addr =
"\xff\xff\xff\xff\xff\xff\xff\xff"
{
int idx = p_parser->sel.nkeys;
uint32_t off = actions->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
- offsetof(struct ether_hdr, s_addr) :
- offsetof(struct ether_hdr, d_addr);
+ offsetof(struct rte_ether_hdr, s_addr) :
+ offsetof(struct rte_ether_hdr, d_addr);
const struct rte_flow_action_set_mac *conf =
(const struct rte_flow_action_set_mac *)actions->conf;
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
memcpy(&p_parser->keys[idx].val,
conf->mac_addr + SZ_PEDIT_KEY_VAL,
- ETHER_ADDR_LEN - SZ_PEDIT_KEY_VAL);
+ RTE_ETHER_ADDR_LEN - SZ_PEDIT_KEY_VAL);
p_parser->sel.nkeys = (++idx);
}
if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) {
p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP4;
p_parser->keys[idx].off =
- offsetof(struct ipv4_hdr, time_to_live);
+ offsetof(struct rte_ipv4_hdr, time_to_live);
}
if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6) {
p_parser->keys_ex[idx].htype = TCA_PEDIT_KEY_EX_HDR_TYPE_IP6;
p_parser->keys[idx].off =
- offsetof(struct ipv6_hdr, hop_limits);
+ offsetof(struct rte_ipv6_hdr, hop_limits);
}
if (actions->type == RTE_FLOW_ACTION_TYPE_DEC_TTL) {
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_ADD;
int keys = NUM_OF_PEDIT_KEYS(IPV6_ADDR_LEN);
int off_base =
actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
- offsetof(struct ipv6_hdr, src_addr) :
- offsetof(struct ipv6_hdr, dst_addr);
+ offsetof(struct rte_ipv6_hdr, src_addr) :
+ offsetof(struct rte_ipv6_hdr, dst_addr);
const struct rte_flow_action_set_ipv6 *conf =
(const struct rte_flow_action_set_ipv6 *)actions->conf;
p_parser->keys_ex[idx].cmd = TCA_PEDIT_KEY_EX_CMD_SET;
p_parser->keys[idx].off =
actions->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
- offsetof(struct ipv4_hdr, src_addr) :
- offsetof(struct ipv4_hdr, dst_addr);
+ offsetof(struct rte_ipv4_hdr, src_addr) :
+ offsetof(struct rte_ipv4_hdr, dst_addr);
p_parser->keys[idx].mask = ~UINT32_MAX;
p_parser->keys[idx].val =
((const struct rte_flow_action_set_ipv4 *)
flags |= MLX5_FLOW_ACTION_DEC_TTL;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
- keys += NUM_OF_PEDIT_KEYS(ETHER_ADDR_LEN);
+ keys += NUM_OF_PEDIT_KEYS(RTE_ETHER_ADDR_LEN);
flags |= MLX5_FLOW_ACTION_SET_MAC_SRC;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
- keys += NUM_OF_PEDIT_KEYS(ETHER_ADDR_LEN);
+ keys += NUM_OF_PEDIT_KEYS(RTE_ETHER_ADDR_LEN);
flags |= MLX5_FLOW_ACTION_SET_MAC_DST;
break;
default:
" vxlan encapsulation");
}
msk6 = (rte_be_to_cpu_32(mask->hdr.vtc_flow) >>
- IPV6_HDR_TC_SHIFT) & 0xff;
+ RTE_IPV6_HDR_TC_SHIFT) & 0xff;
if (msk6 && msk6 != 0xff)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
*/
if ((action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
(action_flags & MLX5_FLOW_ACTION_PORT_ID) &&
- ((struct priv *)port_id_dev->data->dev_private)->representor)
+ ((struct mlx5_priv *)port_id_dev->data->dev_private)->representor)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, actions,
"vlan push can only be applied"
case RTE_FLOW_ITEM_TYPE_PORT_ID:
break;
case RTE_FLOW_ITEM_TYPE_ETH:
- size += SZ_NLATTR_DATA_OF(ETHER_ADDR_LEN) * 4;
+ size += SZ_NLATTR_DATA_OF(RTE_ETHER_ADDR_LEN) * 4;
/* dst/src MAC addr and mask. */
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
if (ipv6 && ipv6->hdr.hop_limits)
size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
if (ipv6 && (rte_be_to_cpu_32(ipv6->hdr.vtc_flow) &
- (0xfful << IPV6_HDR_TC_SHIFT)))
+ (0xfful << RTE_IPV6_HDR_TC_SHIFT)))
size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
break;
}
if (ipv6 && ipv6->hdr.hop_limits)
size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
if (ipv6 && (rte_be_to_cpu_32(ipv6->hdr.vtc_flow) &
- (0xfful << IPV6_HDR_TC_SHIFT)))
+ (0xfful << RTE_IPV6_HDR_TC_SHIFT)))
size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
break;
}
* VXLAN VNI in 24-bit wire format.
*
* @return
- * VXLAN VNI as a 32-bit integer value in network endian.
+ * VXLAN VNI as a 32-bit integer value in network endianness.
*/
static inline rte_be32_t
vxlan_vni_as_be32(const uint8_t vni[3])
FLOW_TCF_ENCAP_IPV6_DST;
if (mask) {
if ((rte_be_to_cpu_32(mask->hdr.vtc_flow) >>
- IPV6_HDR_TC_SHIFT) & 0xff) {
+ RTE_IPV6_HDR_TC_SHIFT) & 0xff) {
encap->mask |= FLOW_TCF_ENCAP_IP_TOS;
encap->ip_tos = (rte_be_to_cpu_32
(spec->hdr.vtc_flow) >>
- IPV6_HDR_TC_SHIFT) & 0xff;
+ RTE_IPV6_HDR_TC_SHIFT) & 0xff;
}
if (mask->hdr.hop_limits) {
encap->mask |= FLOW_TCF_ENCAP_IP_TTL;
" parameter is ignored");
break;
}
- if (!is_zero_ether_addr(&mask.eth->dst)) {
+ if (!rte_is_zero_ether_addr(&mask.eth->dst)) {
mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_DST,
- ETHER_ADDR_LEN,
+ RTE_ETHER_ADDR_LEN,
spec.eth->dst.addr_bytes);
mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_DST_MASK,
- ETHER_ADDR_LEN,
+ RTE_ETHER_ADDR_LEN,
mask.eth->dst.addr_bytes);
}
- if (!is_zero_ether_addr(&mask.eth->src)) {
+ if (!rte_is_zero_ether_addr(&mask.eth->src)) {
mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_SRC,
- ETHER_ADDR_LEN,
+ RTE_ETHER_ADDR_LEN,
spec.eth->src.addr_bytes);
mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_SRC_MASK,
- ETHER_ADDR_LEN,
+ RTE_ETHER_ADDR_LEN,
mask.eth->src.addr_bytes);
}
assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
mask.ipv6->hdr.hop_limits);
}
msk6 = (rte_be_to_cpu_32(mask.ipv6->hdr.vtc_flow) >>
- IPV6_HDR_TC_SHIFT) & 0xff;
+ RTE_IPV6_HDR_TC_SHIFT) & 0xff;
if (msk6) {
tos6 = (rte_be_to_cpu_32
(spec.ipv6->hdr.vtc_flow) >>
- IPV6_HDR_TC_SHIFT) & 0xff;
+ RTE_IPV6_HDR_TC_SHIFT) & 0xff;
mnl_attr_put_u8
(nlh, tunnel_outer ?
TCA_FLOWER_KEY_ENC_IP_TOS :
nlh->nlmsg_flags |= NLM_F_ACK;
ret = mnl_socket_sendto(tcf->nl, nlh, nlh->nlmsg_len);
if (ret <= 0) {
- /* Message send error occurres. */
+ /* Message send error occurred. */
rte_errno = errno;
return -rte_errno;
}
* @param[in] tcf
* Context object initialized by mlx5_flow_tcf_context_create().
* @param[in] ifindex
- * Network inferface index to perform cleanup.
+ * Network interface index to perform cleanup.
*/
static void
flow_tcf_encap_local_cleanup(struct mlx5_flow_tcf_context *tcf,
}
/**
- * Collect neigh permament rules on specified network device.
+ * Collect neigh permanent rules on specified network device.
* This is callback routine called by libmnl mnl_cb_run() in loop for
* every message in received packet.
*
}
if (!na_mac || !na_ip)
return 1;
- /* Neigh rule with permenent attribute found. */
+ /* Neigh rule with permanent attribute found. */
size = MNL_ALIGN(sizeof(struct nlmsghdr)) +
MNL_ALIGN(sizeof(struct ndmsg)) +
- SZ_NLATTR_DATA_OF(ETHER_ADDR_LEN) +
+ SZ_NLATTR_DATA_OF(RTE_ETHER_ADDR_LEN) +
(family == AF_INET6 ? SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN)
: SZ_NLATTR_TYPE_OF(uint32_t));
cmd = flow_tcf_alloc_nlcmd(ctx, size);
mnl_attr_put(cmd, NDA_DST, IPV6_ADDR_LEN,
mnl_attr_get_payload(na_ip));
}
- mnl_attr_put(cmd, NDA_LLADDR, ETHER_ADDR_LEN,
+ mnl_attr_put(cmd, NDA_LLADDR, RTE_ETHER_ADDR_LEN,
mnl_attr_get_payload(na_mac));
assert(size == cmd->nlmsg_len);
return 1;
* @param[in] tcf
* Context object initialized by mlx5_flow_tcf_context_create().
* @param[in] ifindex
- * Network inferface index to perform cleanup.
+ * Network interface index to perform cleanup.
*/
static void
flow_tcf_encap_neigh_cleanup(struct mlx5_flow_tcf_context *tcf,
* Note that an implicit route is maintained by the kernel due to the
* presence of a peer address (IFA_ADDRESS).
*
- * These rules are used for encapsultion only and allow to assign
+ * These rules are used for encapsulation only and allow to assign
* the outer tunnel source IP address.
*
* @param[in] tcf
/**
* Releases VXLAN encap rules container by pointer. Decrements the
- * reference cointer and deletes the container if counter is zero.
+ * reference counter and deletes the container if counter is zero.
*
* @param[in] irule
* VXLAN rule container pointer to release.
* @param[in] tcf
* Context object initialized by mlx5_flow_tcf_context_create().
* @param[in] vtep
- * Object represinting the network device to delete. Memory
+ * Object representing the network device to delete. Memory
* allocated for this object is freed by routine.
*/
static void
}
/**
- * Aqcuire target interface index for VXLAN tunneling encapsulation.
+ * Acquire target interface index for VXLAN tunneling encapsulation.
*
* @param[in] tcf
* Context object initialized by mlx5_flow_tcf_context_create().
static void
flow_tcf_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
struct mlx5_flow *dev_flow;
struct nlmsghdr *nlh;
/**
* Fetch the applied rule handle. This is callback routine called by
* libmnl mnl_cb_run() in loop for every message in received packet.
- * When the NLM_F_ECHO flag i sspecified the kernel sends the created
+ * When the NLM_F_ECHO flag is specified the kernel sends the created
* rule descriptor back to the application and we can retrieve the
* actual rule handle from updated descriptor.
*
flow_tcf_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
struct mlx5_flow *dev_flow;
struct nlmsghdr *nlh;
{
struct flow_tcf_stats_basic sb_data;
struct rte_flow_query_count *qc = data;
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_tcf_context *ctx = priv->tcf_context;
struct mnl_socket *nl = ctx->nl;
struct mlx5_flow *dev_flow;