git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
net/hns3: fix imprecise statistics
[dpdk.git]
/
drivers
/
net
/
mlx4
/
mlx4_flow.c
diff --git
a/drivers/net/mlx4/mlx4_flow.c
b/drivers/net/mlx4/mlx4_flow.c
index
5c851e9
..
b8ecfa8
100644
(file)
--- a/
drivers/net/mlx4/mlx4_flow.c
+++ b/
drivers/net/mlx4/mlx4_flow.c
@@
-9,7
+9,6
@@
*/
#include <arpa/inet.h>
*/
#include <arpa/inet.h>
-#include <assert.h>
#include <errno.h>
#include <stdalign.h>
#include <stddef.h>
#include <errno.h>
#include <stdalign.h>
#include <stddef.h>
@@
-28,8
+27,7
@@
#include <rte_byteorder.h>
#include <rte_errno.h>
#include <rte_byteorder.h>
#include <rte_errno.h>
-#include <rte_eth_ctrl.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_ether.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_ether.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
@@
-225,7
+223,7
@@
mlx4_flow_merge_eth(struct rte_flow *flow,
goto error;
}
flow->allmulti = 1;
goto error;
}
flow->allmulti = 1;
- } else if (sum_dst != (UINT8_C(0xff) * ETHER_ADDR_LEN)) {
+ } else if (sum_dst != (UINT8_C(0xff) *
RTE_
ETHER_ADDR_LEN)) {
msg = "mlx4 does not support matching partial"
" Ethernet fields";
goto error;
msg = "mlx4 does not support matching partial"
" Ethernet fields";
goto error;
@@
-250,14
+248,15
@@
mlx4_flow_merge_eth(struct rte_flow *flow,
if (!mask) {
eth->val.dst_mac[0] = 0xff;
flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
if (!mask) {
eth->val.dst_mac[0] = 0xff;
flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
+ flow->promisc = 1;
return 0;
}
return 0;
}
- memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(eth->val.dst_mac, spec->dst.addr_bytes,
RTE_
ETHER_ADDR_LEN);
+ memcpy(eth->mask.dst_mac, mask->dst.addr_bytes,
RTE_
ETHER_ADDR_LEN);
/* Remove unwanted bits from values. */
/* Remove unwanted bits from values. */
- for (i = 0; i <
ETHER_ADDR_LEN; ++i) {
+ for (i = 0; i <
RTE_ETHER_ADDR_LEN; ++i)
eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
- }
+
return 0;
error:
return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
return 0;
error:
return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
@@
-547,7
+546,7
@@
mlx4_flow_item_check(const struct rte_flow_item *item,
mask = item->mask ?
(const uint8_t *)item->mask :
(const uint8_t *)proc->mask_default;
mask = item->mask ?
(const uint8_t *)item->mask :
(const uint8_t *)proc->mask_default;
-
assert
(mask);
+
MLX4_ASSERT
(mask);
/*
* Single-pass check to make sure that:
* - Mask is supported, no bits are set outside proc->mask_support.
/*
* Single-pass check to make sure that:
* - Mask is supported, no bits are set outside proc->mask_support.
@@
-772,7
+771,7
@@
fill:
if (flow->rss)
break;
queue = action->conf;
if (flow->rss)
break;
queue = action->conf;
- if (queue->index >=
priv->dev
->data->nb_rx_queues) {
+ if (queue->index >=
ETH_DEV(priv)
->data->nb_rx_queues) {
msg = "queue target index beyond number of"
" configured Rx queues";
goto exit_action_not_supported;
msg = "queue target index beyond number of"
" configured Rx queues";
goto exit_action_not_supported;
@@
-801,7
+800,7
@@
fill:
/* Sanity checks. */
for (i = 0; i < rss->queue_num; ++i)
if (rss->queue[i] >=
/* Sanity checks. */
for (i = 0; i < rss->queue_num; ++i)
if (rss->queue[i] >=
-
priv->dev
->data->nb_rx_queues)
+
ETH_DEV(priv)
->data->nb_rx_queues)
break;
if (i != rss->queue_num) {
msg = "queue index target beyond number of"
break;
if (i != rss->queue_num) {
msg = "queue index target beyond number of"
@@
-954,8
+953,8
@@
mlx4_drop_get(struct mlx4_priv *priv)
struct mlx4_drop *drop = priv->drop;
if (drop) {
struct mlx4_drop *drop = priv->drop;
if (drop) {
-
assert
(drop->refcnt);
-
assert
(drop->priv == priv);
+
MLX4_ASSERT
(drop->refcnt);
+
MLX4_ASSERT
(drop->priv == priv);
++drop->refcnt;
return drop;
}
++drop->refcnt;
return drop;
}
@@
-981,12
+980,13
@@
mlx4_drop_get(struct mlx4_priv *priv)
priv->drop = drop;
return drop;
error:
priv->drop = drop;
return drop;
error:
- if (drop
->qp)
- claim_zero(mlx4_glue->destroy_qp(drop->qp));
- if (drop->cq)
- claim_zero(mlx4_glue->destroy_cq(drop->cq));
- if (drop)
+ if (drop
) {
+ if (drop->qp)
+ claim_zero(mlx4_glue->destroy_qp(drop->qp));
+ if (drop->cq)
+ claim_zero(mlx4_glue->destroy_cq(drop->cq));
rte_free(drop);
rte_free(drop);
+ }
rte_errno = ENOMEM;
return NULL;
}
rte_errno = ENOMEM;
return NULL;
}
@@
-1000,7
+1000,7
@@
error:
static void
mlx4_drop_put(struct mlx4_drop *drop)
{
static void
mlx4_drop_put(struct mlx4_drop *drop)
{
-
assert
(drop->refcnt);
+
MLX4_ASSERT
(drop->refcnt);
if (--drop->refcnt)
return;
drop->priv->drop = NULL;
if (--drop->refcnt)
return;
drop->priv->drop = NULL;
@@
-1045,7
+1045,7
@@
mlx4_flow_toggle(struct mlx4_priv *priv,
mlx4_rss_detach(flow->rss);
return 0;
}
mlx4_rss_detach(flow->rss);
return 0;
}
-
assert
(flow->ibv_attr);
+
MLX4_ASSERT
(flow->ibv_attr);
if (!flow->internal &&
!priv->isolated &&
flow->ibv_attr->priority == MLX4_FLOW_PRIORITY_LAST) {
if (!flow->internal &&
!priv->isolated &&
flow->ibv_attr->priority == MLX4_FLOW_PRIORITY_LAST) {
@@
-1071,8
+1071,8
@@
mlx4_flow_toggle(struct mlx4_priv *priv,
/* Stop at the first nonexistent target queue. */
for (i = 0; i != rss->queues; ++i)
if (rss->queue_id[i] >=
/* Stop at the first nonexistent target queue. */
for (i = 0; i != rss->queues; ++i)
if (rss->queue_id[i] >=
-
priv->dev
->data->nb_rx_queues ||
- !
priv->dev
->data->rx_queues[rss->queue_id[i]]) {
+
ETH_DEV(priv)
->data->nb_rx_queues ||
+ !
ETH_DEV(priv)
->data->rx_queues[rss->queue_id[i]]) {
missing = 1;
break;
}
missing = 1;
break;
}
@@
-1111,7
+1111,7
@@
mlx4_flow_toggle(struct mlx4_priv *priv,
}
qp = priv->drop->qp;
}
}
qp = priv->drop->qp;
}
-
assert
(qp);
+
MLX4_ASSERT
(qp);
if (flow->ibv_flow)
return 0;
flow->ibv_flow = mlx4_glue->create_flow(qp, flow->ibv_attr);
if (flow->ibv_flow)
return 0;
flow->ibv_flow = mlx4_glue->create_flow(qp, flow->ibv_attr);
@@
-1257,7
+1257,7
@@
static uint16_t
mlx4_flow_internal_next_vlan(struct mlx4_priv *priv, uint16_t vlan)
{
while (vlan < 4096) {
mlx4_flow_internal_next_vlan(struct mlx4_priv *priv, uint16_t vlan)
{
while (vlan < 4096) {
- if (
priv->dev
->data->vlan_filter_conf.ids[vlan / 64] &
+ if (
ETH_DEV(priv)
->data->vlan_filter_conf.ids[vlan / 64] &
(UINT64_C(1) << (vlan % 64)))
return vlan;
++vlan;
(UINT64_C(1) << (vlan % 64)))
return vlan;
++vlan;
@@
-1334,7
+1334,7
@@
mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
* get RSS by default.
*/
uint32_t queues =
* get RSS by default.
*/
uint32_t queues =
- rte_align32pow2(
priv->dev
->data->nb_rx_queues + 1) >> 1;
+ rte_align32pow2(
ETH_DEV(priv)
->data->nb_rx_queues + 1) >> 1;
uint16_t queue[queues];
struct rte_flow_action_rss action_rss = {
.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
uint16_t queue[queues];
struct rte_flow_action_rss action_rss = {
.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
@@
-1354,11
+1354,11
@@
mlx4_flow_internal(struct mlx4_priv *priv, struct rte_flow_error *error)
.type = RTE_FLOW_ACTION_TYPE_END,
},
};
.type = RTE_FLOW_ACTION_TYPE_END,
},
};
- struct ether_addr *rule_mac = ð_spec.dst;
+ struct
rte_
ether_addr *rule_mac = ð_spec.dst;
rte_be16_t *rule_vlan =
rte_be16_t *rule_vlan =
- (
priv->dev
->data->dev_conf.rxmode.offloads &
+ (
ETH_DEV(priv)
->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_VLAN_FILTER) &&
DEV_RX_OFFLOAD_VLAN_FILTER) &&
- !
priv->dev
->data->promiscuous ?
+ !
ETH_DEV(priv)
->data->promiscuous ?
&vlan_spec.tci :
NULL;
uint16_t vlan = 0;
&vlan_spec.tci :
NULL;
uint16_t vlan = 0;
@@
-1391,14
+1391,14
@@
next_vlan:
}
}
for (i = 0; i != RTE_DIM(priv->mac) + 1; ++i) {
}
}
for (i = 0; i != RTE_DIM(priv->mac) + 1; ++i) {
- const struct ether_addr *mac;
+ const struct
rte_
ether_addr *mac;
/* Broadcasts are handled by an extra iteration. */
if (i < RTE_DIM(priv->mac))
mac = &priv->mac[i];
else
mac = ð_mask.dst;
/* Broadcasts are handled by an extra iteration. */
if (i < RTE_DIM(priv->mac))
mac = &priv->mac[i];
else
mac = ð_mask.dst;
- if (is_zero_ether_addr(mac))
+ if (
rte_
is_zero_ether_addr(mac))
continue;
/* Check if MAC flow rule is already present. */
for (flow = LIST_FIRST(&priv->flows);
continue;
/* Check if MAC flow rule is already present. */
for (flow = LIST_FIRST(&priv->flows);
@@
-1411,10
+1411,11
@@
next_vlan:
if (!flow->mac)
continue;
if (!flow->mac)
continue;
- assert(flow->ibv_attr->type == IBV_FLOW_ATTR_NORMAL);
- assert(flow->ibv_attr->num_of_specs == 1);
- assert(eth->type == IBV_FLOW_SPEC_ETH);
- assert(flow->rss);
+ MLX4_ASSERT(flow->ibv_attr->type ==
+ IBV_FLOW_ATTR_NORMAL);
+ MLX4_ASSERT(flow->ibv_attr->num_of_specs == 1);
+ MLX4_ASSERT(eth->type == IBV_FLOW_SPEC_ETH);
+ MLX4_ASSERT(flow->rss);
if (rule_vlan &&
(eth->val.vlan_tag != *rule_vlan ||
eth->mask.vlan_tag != RTE_BE16(0x0fff)))
if (rule_vlan &&
(eth->val.vlan_tag != *rule_vlan ||
eth->mask.vlan_tag != RTE_BE16(0x0fff)))
@@
-1438,7
+1439,7
@@
next_vlan:
if (!flow || !flow->internal) {
/* Not found, create a new flow rule. */
memcpy(rule_mac, mac, sizeof(*mac));
if (!flow || !flow->internal) {
/* Not found, create a new flow rule. */
memcpy(rule_mac, mac, sizeof(*mac));
- flow = mlx4_flow_create(
priv->dev
, &attr, pattern,
+ flow = mlx4_flow_create(
ETH_DEV(priv)
, &attr, pattern,
actions, error);
if (!flow) {
err = -rte_errno;
actions, error);
if (!flow) {
err = -rte_errno;
@@
-1454,21
+1455,22
@@
next_vlan:
goto next_vlan;
}
/* Take care of promiscuous and all multicast flow rules. */
goto next_vlan;
}
/* Take care of promiscuous and all multicast flow rules. */
- if (priv->dev->data->promiscuous || priv->dev->data->all_multicast) {
+ if (ETH_DEV(priv)->data->promiscuous ||
+ ETH_DEV(priv)->data->all_multicast) {
for (flow = LIST_FIRST(&priv->flows);
flow && flow->internal;
flow = LIST_NEXT(flow, next)) {
for (flow = LIST_FIRST(&priv->flows);
flow && flow->internal;
flow = LIST_NEXT(flow, next)) {
- if (
priv->dev
->data->promiscuous) {
+ if (
ETH_DEV(priv)
->data->promiscuous) {
if (flow->promisc)
break;
} else {
if (flow->promisc)
break;
} else {
-
assert(priv->dev
->data->all_multicast);
+
MLX4_ASSERT(ETH_DEV(priv)
->data->all_multicast);
if (flow->allmulti)
break;
}
}
if (flow && flow->internal) {
if (flow->allmulti)
break;
}
}
if (flow && flow->internal) {
-
assert
(flow->rss);
+
MLX4_ASSERT
(flow->rss);
if (flow->rss->queues != queues ||
memcmp(flow->rss->queue_id, action_rss.queue,
queues * sizeof(flow->rss->queue_id[0])))
if (flow->rss->queues != queues ||
memcmp(flow->rss->queue_id, action_rss.queue,
queues * sizeof(flow->rss->queue_id[0])))
@@
-1476,23
+1478,23
@@
next_vlan:
}
if (!flow || !flow->internal) {
/* Not found, create a new flow rule. */
}
if (!flow || !flow->internal) {
/* Not found, create a new flow rule. */
- if (
priv->dev
->data->promiscuous) {
+ if (
ETH_DEV(priv)
->data->promiscuous) {
pattern[1].spec = NULL;
pattern[1].mask = NULL;
} else {
pattern[1].spec = NULL;
pattern[1].mask = NULL;
} else {
-
assert(priv->dev
->data->all_multicast);
+
MLX4_ASSERT(ETH_DEV(priv)
->data->all_multicast);
pattern[1].spec = ð_allmulti;
pattern[1].mask = ð_allmulti;
}
pattern[2] = pattern[3];
pattern[1].spec = ð_allmulti;
pattern[1].mask = ð_allmulti;
}
pattern[2] = pattern[3];
- flow = mlx4_flow_create(
priv->dev
, &attr, pattern,
+ flow = mlx4_flow_create(
ETH_DEV(priv)
, &attr, pattern,
actions, error);
if (!flow) {
err = -rte_errno;
goto error;
}
}
actions, error);
if (!flow) {
err = -rte_errno;
goto error;
}
}
-
assert
(flow->promisc || flow->allmulti);
+
MLX4_ASSERT
(flow->promisc || flow->allmulti);
flow->select = 1;
}
error:
flow->select = 1;
}
error:
@@
-1502,7
+1504,8
@@
error:
struct rte_flow *next = LIST_NEXT(flow, next);
if (!flow->select)
struct rte_flow *next = LIST_NEXT(flow, next);
if (!flow->select)
- claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
+ claim_zero(mlx4_flow_destroy(ETH_DEV(priv), flow,
+ error));
else
flow->select = 0;
flow = next;
else
flow->select = 0;
flow = next;
@@
-1540,7
+1543,8
@@
mlx4_flow_sync(struct mlx4_priv *priv, struct rte_flow_error *error)
for (flow = LIST_FIRST(&priv->flows);
flow && flow->internal;
flow = LIST_FIRST(&priv->flows))
for (flow = LIST_FIRST(&priv->flows);
flow && flow->internal;
flow = LIST_FIRST(&priv->flows))
- claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
+ claim_zero(mlx4_flow_destroy(ETH_DEV(priv), flow,
+ error));
} else {
/* Refresh internal rules. */
ret = mlx4_flow_internal(priv, error);
} else {
/* Refresh internal rules. */
ret = mlx4_flow_internal(priv, error);
@@
-1554,7
+1558,7
@@
mlx4_flow_sync(struct mlx4_priv *priv, struct rte_flow_error *error)
return ret;
}
if (!priv->started)
return ret;
}
if (!priv->started)
-
assert
(!priv->drop);
+
MLX4_ASSERT
(!priv->drop);
return 0;
}
return 0;
}
@@
-1573,8
+1577,8
@@
mlx4_flow_clean(struct mlx4_priv *priv)
struct rte_flow *flow;
while ((flow = LIST_FIRST(&priv->flows)))
struct rte_flow *flow;
while ((flow = LIST_FIRST(&priv->flows)))
- mlx4_flow_destroy(
priv->dev
, flow, NULL);
-
assert
(LIST_EMPTY(&priv->rss));
+ mlx4_flow_destroy(
ETH_DEV(priv)
, flow, NULL);
+
MLX4_ASSERT
(LIST_EMPTY(&priv->rss));
}
static const struct rte_flow_ops mlx4_flow_ops = {
}
static const struct rte_flow_ops mlx4_flow_ops = {