TUNNEL,
FLEX,
QUEUE,
+ PUSH,
/* Flex arguments */
FLEX_ITEM_INIT,
QUEUE_DESTROY_ID,
QUEUE_DESTROY_POSTPONE,
+ /* Push arguments. */
+ PUSH_QUEUE,
+
/* Table arguments. */
TABLE_CREATE,
TABLE_DESTROY,
static int parse_qo_destroy(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
+static int parse_push(struct context *, const struct token *,
+ const char *, unsigned int,
+ void *, unsigned int);
static int parse_tunnel(struct context *, const struct token *,
const char *, unsigned int,
void *, unsigned int);
ISOLATE,
TUNNEL,
FLEX,
- QUEUE)),
+ QUEUE,
+ PUSH)),
.call = parse_init,
},
/* Top-level command. */
.call = parse_qo_destroy,
},
/* Top-level command. */
+ [PUSH] = {
+ .name = "push",
+ .help = "push enqueued operations",
+ .next = NEXT(NEXT_ENTRY(PUSH_QUEUE), NEXT_ENTRY(COMMON_PORT_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, port)),
+ .call = parse_push,
+ },
+ /* Sub-level commands. */
+ [PUSH_QUEUE] = {
+ .name = "queue",
+ .help = "specify queue id",
+ .next = NEXT(NEXT_ENTRY(END), NEXT_ENTRY(COMMON_QUEUE_ID)),
+ .args = ARGS(ARGS_ENTRY(struct buffer, queue)),
+ },
+ /* Top-level command. */
[INDIRECT_ACTION] = {
.name = "indirect_action",
.type = "{command} {port_id} [{arg} [...]]",
}
}
+/** Parse tokens for push queue command. */
+static int
+parse_push(struct context *ctx, const struct token *token,
+ const char *str, unsigned int len,
+ void *buf, unsigned int size)
+{
+ struct buffer *out = buf;
+
+ /* Token name must match. */
+ if (parse_default(ctx, token, str, len, NULL, 0) < 0)
+ return -1;
+ /* Nothing else to do if there is no buffer. */
+ if (!out)
+ return len;
+ if (!out->command) {
+ if (ctx->curr != PUSH)
+ return -1;
+ if (sizeof(*out) > size)
+ return -1;
+ out->command = ctx->curr;
+ ctx->objdata = 0;
+ ctx->object = out;
+ ctx->objmask = NULL;
+ out->args.vc.data = (uint8_t *)out + size;
+ }
+ return len;
+}
+
static int
parse_flex(struct context *ctx, const struct token *token,
const char *str, unsigned int len,
in->args.destroy.rule_n,
in->args.destroy.rule);
break;
+ case PUSH:
+ port_queue_flow_push(in->port, in->queue);
+ break;
case INDIRECT_ACTION_CREATE:
port_action_handle_create(
in->port, in->args.vc.attr.group,
return ret;
}
+/** Push all the queue operations in the queue to the NIC. */
+int
+port_queue_flow_push(portid_t port_id, queueid_t queue_id)
+{
+ struct rte_port *port;
+ struct rte_flow_error error;
+ int ret = 0;
+
+ if (port_id_is_invalid(port_id, ENABLED_WARN) ||
+ port_id == (portid_t)RTE_PORT_ALL)
+ return -EINVAL;
+ port = &ports[port_id];
+
+ if (queue_id >= port->queue_nb) {
+ printf("Queue #%u is invalid\n", queue_id);
+ return -EINVAL;
+ }
+
+ memset(&error, 0x55, sizeof(error));
+ ret = rte_flow_push(port_id, queue_id, &error);
+ if (ret < 0) {
+ printf("Failed to push operations in the queue\n");
+ return -EINVAL;
+ }
+ printf("Queue #%u operations pushed\n", queue_id);
+ return ret;
+}
+
/** Create flow rule. */
int
port_flow_create(portid_t port_id,
const struct rte_flow_action *actions);
int port_queue_flow_destroy(portid_t port_id, queueid_t queue_id,
bool postpone, uint32_t n, const uint32_t *rule);
+int port_queue_flow_push(portid_t port_id, queueid_t queue_id);
int port_flow_validate(portid_t port_id,
const struct rte_flow_attr *attr,
const struct rte_flow_item *pattern,
flow queue {port_id} destroy {queue_id}
[postpone {boolean}] rule {rule_id} [...]
+- Push enqueued operations::
+
+ flow push {port_id} queue {queue_id}
+
- Create a flow rule::
flow create {port_id}
Caught error type [...] ([...]): [...]
+Pushing enqueued operations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+``flow push`` pushes all the outstanding enqueued operations
+to the underlying device immediately.
+It is bound to ``rte_flow_push()``::
+
+ flow push {port_id} queue {queue_id}
+
+If successful, it will show::
+
+ Queue #[...] operations pushed
+
+The usual error message is shown when operations cannot be pushed::
+
+ Caught error type [...] ([...]): [...]
+
Creating a tunnel stub for offload
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~