X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Ffailsafe%2Ffailsafe_flow.c;h=5e2b5f7c67575abf75d11132bda7537b091f67a5;hb=82c4fb852e4287a7e6ff529d333648e52f40d451;hp=4d18e8ee7359f9bf1ea645a4e68a9c09681048a6;hpb=009c327c886432018e23ecb88c25513d69a73661;p=dpdk.git diff --git a/drivers/net/failsafe/failsafe_flow.c b/drivers/net/failsafe/failsafe_flow.c index 4d18e8ee73..5e2b5f7c67 100644 --- a/drivers/net/failsafe/failsafe_flow.c +++ b/drivers/net/failsafe/failsafe_flow.c @@ -1,10 +1,13 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2017 6WIND S.A. - * Copyright 2017 Mellanox. + * Copyright 2017 Mellanox Technologies, Ltd */ +#include +#include #include +#include #include #include #include @@ -18,19 +21,33 @@ fs_flow_allocate(const struct rte_flow_attr *attr, const struct rte_flow_action *actions) { struct rte_flow *flow; - size_t fdsz; + const struct rte_flow_conv_rule rule = { + .attr_ro = attr, + .pattern_ro = items, + .actions_ro = actions, + }; + struct rte_flow_error error; + int ret; - fdsz = rte_flow_copy(NULL, 0, attr, items, actions); - flow = rte_zmalloc(NULL, - sizeof(struct rte_flow) + fdsz, + ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, &error); + if (ret < 0) { + ERROR("Unable to process flow rule (%s): %s", + error.message ? error.message : "unspecified", + strerror(rte_errno)); + return NULL; + } + flow = rte_zmalloc(NULL, offsetof(struct rte_flow, rule) + ret, RTE_CACHE_LINE_SIZE); if (flow == NULL) { ERROR("Could not allocate new flow"); return NULL; } - flow->fd = (void *)((uintptr_t)flow + sizeof(*flow)); - if (rte_flow_copy(flow->fd, fdsz, attr, items, actions) != fdsz) { - ERROR("Failed to copy flow description"); + ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &flow->rule, ret, &rule, + &error); + if (ret < 0) { + ERROR("Failed to copy flow rule (%s): %s", + error.message ? error.message : "unspecified", + strerror(rte_errno)); rte_free(flow); return NULL; } @@ -55,6 +72,7 @@ fs_flow_validate(struct rte_eth_dev *dev, uint8_t i; int ret; + fs_lock(dev, 0); FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { DEBUG("Calling rte_flow_validate on sub_device %d", i); ret = rte_flow_validate(PORT_ID(sdev), @@ -62,9 +80,11 @@ fs_flow_validate(struct rte_eth_dev *dev, if ((ret = fs_err(sdev, ret))) { ERROR("Operation rte_flow_validate failed for sub_device %d" " with error %d", i, ret); + fs_unlock(dev, 0); return ret; } } + fs_unlock(dev, 0); return 0; } @@ -79,6 +99,7 @@ fs_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow; uint8_t i; + fs_lock(dev, 0); flow = fs_flow_allocate(attr, patterns, actions); FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { flow->flows[i] = rte_flow_create(PORT_ID(sdev), @@ -90,6 +111,7 @@ fs_flow_create(struct rte_eth_dev *dev, } } TAILQ_INSERT_TAIL(&PRIV(dev)->flow_list, flow, next); + fs_unlock(dev, 0); return flow; err: FOREACH_SUBDEV(sdev, i, dev) { @@ -98,6 +120,7 @@ err: flow->flows[i], error); } fs_flow_release(&flow); + fs_unlock(dev, 0); return NULL; } @@ -115,6 +138,7 @@ fs_flow_destroy(struct rte_eth_dev *dev, return -EINVAL; } ret = 0; + fs_lock(dev, 0); FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { int local_ret; @@ -131,6 +155,7 @@ fs_flow_destroy(struct rte_eth_dev *dev, } TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next); fs_flow_release(&flow); + fs_unlock(dev, 0); return ret; } @@ -144,12 +169,14 @@ fs_flow_flush(struct rte_eth_dev *dev, uint8_t i; int ret; + fs_lock(dev, 0); FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) { DEBUG("Calling rte_flow_flush on sub_device %d", i); ret = rte_flow_flush(PORT_ID(sdev), error); if ((ret = fs_err(sdev, ret))) { ERROR("Operation rte_flow_flush failed for sub_device %d" " with error %d", i, ret); + fs_unlock(dev, 0); return ret; } } @@ -157,27 +184,32 @@ fs_flow_flush(struct rte_eth_dev *dev, TAILQ_REMOVE(&PRIV(dev)->flow_list, flow, next); fs_flow_release(&flow); } + fs_unlock(dev, 0); return 0; } static int fs_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, - enum rte_flow_action_type type, + const struct rte_flow_action *action, void *arg, struct rte_flow_error *error) { struct sub_device *sdev; + fs_lock(dev, 0); sdev = TX_SUBDEV(dev); if (sdev != NULL) { int ret = rte_flow_query(PORT_ID(sdev), flow->flows[SUB_ID(sdev)], - type, arg, error); + action, arg, error); - if ((ret = fs_err(sdev, ret))) + if ((ret = fs_err(sdev, ret))) { + fs_unlock(dev, 0); return ret; + } } + fs_unlock(dev, 0); WARN("No active sub_device to query about its flow"); return -1; } @@ -191,6 +223,7 @@ fs_flow_isolate(struct rte_eth_dev *dev, uint8_t i; int ret; + fs_lock(dev, 0); FOREACH_SUBDEV(sdev, i, dev) { if (sdev->state < DEV_PROBED) continue; @@ -202,11 +235,13 @@ fs_flow_isolate(struct rte_eth_dev *dev, if ((ret = fs_err(sdev, ret))) { ERROR("Operation rte_flow_isolate failed for sub_device %d" " with error %d", i, ret); + fs_unlock(dev, 0); return ret; } sdev->flow_isolated = set; } PRIV(dev)->flow_isolated = set; + fs_unlock(dev, 0); return 0; }