4 * Copyright 2016 6WIND S.A.
5 * Copyright 2016 Mellanox.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of 6WIND S.A. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <rte_common.h>
40 #include <rte_errno.h>
41 #include <rte_branch_prediction.h>
42 #include "rte_ethdev.h"
43 #include "rte_flow_driver.h"
47 * Flow elements description tables.
49 struct rte_flow_desc_data {
54 /** Generate flow_item[] entry. */
55 #define MK_FLOW_ITEM(t, s) \
56 [RTE_FLOW_ITEM_TYPE_ ## t] = { \
61 /** Information about known flow pattern items. */
62 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
64 MK_FLOW_ITEM(VOID, 0),
65 MK_FLOW_ITEM(INVERT, 0),
66 MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
68 MK_FLOW_ITEM(VF, sizeof(struct rte_flow_item_vf)),
69 MK_FLOW_ITEM(PORT, sizeof(struct rte_flow_item_port)),
70 MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)), /* +pattern[] */
71 MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
72 MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
73 MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
74 MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
75 MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
76 MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
77 MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
78 MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
79 MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
80 MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
81 MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
82 MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
83 MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
86 /** Generate flow_action[] entry. */
87 #define MK_FLOW_ACTION(t, s) \
88 [RTE_FLOW_ACTION_TYPE_ ## t] = { \
93 /** Information about known flow actions. */
94 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
95 MK_FLOW_ACTION(END, 0),
96 MK_FLOW_ACTION(VOID, 0),
97 MK_FLOW_ACTION(PASSTHRU, 0),
98 MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
99 MK_FLOW_ACTION(FLAG, 0),
100 MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
101 MK_FLOW_ACTION(DROP, 0),
102 MK_FLOW_ACTION(COUNT, 0),
103 MK_FLOW_ACTION(DUP, sizeof(struct rte_flow_action_dup)),
104 MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)), /* +queue[] */
105 MK_FLOW_ACTION(PF, 0),
106 MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
109 /* Get generic flow operations structure from a port. */
110 const struct rte_flow_ops *
111 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
113 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
114 const struct rte_flow_ops *ops;
117 if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
119 else if (unlikely(!dev->dev_ops->filter_ctrl ||
120 dev->dev_ops->filter_ctrl(dev,
121 RTE_ETH_FILTER_GENERIC,
128 rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
129 NULL, rte_strerror(code));
133 /* Check whether a flow rule can be created on a given port. */
135 rte_flow_validate(uint16_t port_id,
136 const struct rte_flow_attr *attr,
137 const struct rte_flow_item pattern[],
138 const struct rte_flow_action actions[],
139 struct rte_flow_error *error)
141 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
142 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
146 if (likely(!!ops->validate))
147 return ops->validate(dev, attr, pattern, actions, error);
148 return rte_flow_error_set(error, ENOSYS,
149 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
150 NULL, rte_strerror(ENOSYS));
153 /* Create a flow rule on a given port. */
155 rte_flow_create(uint16_t port_id,
156 const struct rte_flow_attr *attr,
157 const struct rte_flow_item pattern[],
158 const struct rte_flow_action actions[],
159 struct rte_flow_error *error)
161 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
162 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
166 if (likely(!!ops->create))
167 return ops->create(dev, attr, pattern, actions, error);
168 rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
169 NULL, rte_strerror(ENOSYS));
173 /* Destroy a flow rule on a given port. */
175 rte_flow_destroy(uint16_t port_id,
176 struct rte_flow *flow,
177 struct rte_flow_error *error)
179 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
180 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
184 if (likely(!!ops->destroy))
185 return ops->destroy(dev, flow, error);
186 return rte_flow_error_set(error, ENOSYS,
187 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
188 NULL, rte_strerror(ENOSYS));
191 /* Destroy all flow rules associated with a port. */
193 rte_flow_flush(uint16_t port_id,
194 struct rte_flow_error *error)
196 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
197 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
201 if (likely(!!ops->flush))
202 return ops->flush(dev, error);
203 return rte_flow_error_set(error, ENOSYS,
204 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
205 NULL, rte_strerror(ENOSYS));
208 /* Query an existing flow rule. */
210 rte_flow_query(uint16_t port_id,
211 struct rte_flow *flow,
212 enum rte_flow_action_type action,
214 struct rte_flow_error *error)
216 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
217 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
221 if (likely(!!ops->query))
222 return ops->query(dev, flow, action, data, error);
223 return rte_flow_error_set(error, ENOSYS,
224 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
225 NULL, rte_strerror(ENOSYS));
228 /* Restrict ingress traffic to the defined flow rules. */
230 rte_flow_isolate(uint16_t port_id,
232 struct rte_flow_error *error)
234 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
235 const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
239 if (likely(!!ops->isolate))
240 return ops->isolate(dev, set, error);
241 return rte_flow_error_set(error, ENOSYS,
242 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
243 NULL, rte_strerror(ENOSYS));
246 /* Initialize flow error structure. */
248 rte_flow_error_set(struct rte_flow_error *error,
250 enum rte_flow_error_type type,
255 *error = (struct rte_flow_error){
265 /** Compute storage space needed by item specification. */
267 flow_item_spec_size(const struct rte_flow_item *item,
268 size_t *size, size_t *pad)
274 switch (item->type) {
276 const struct rte_flow_item_raw *raw;
279 /* Not a fall-through */
280 case RTE_FLOW_ITEM_TYPE_RAW:
281 spec.raw = item->spec;
282 *size = offsetof(struct rte_flow_item_raw, pattern) +
283 spec.raw->length * sizeof(*spec.raw->pattern);
286 *size = rte_flow_desc_item[item->type].size;
290 *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
293 /** Compute storage space needed by action configuration. */
295 flow_action_conf_size(const struct rte_flow_action *action,
296 size_t *size, size_t *pad)
302 switch (action->type) {
304 const struct rte_flow_action_rss *rss;
307 /* Not a fall-through. */
308 case RTE_FLOW_ACTION_TYPE_RSS:
309 conf.rss = action->conf;
310 *size = offsetof(struct rte_flow_action_rss, queue) +
311 conf.rss->num * sizeof(*conf.rss->queue);
314 *size = rte_flow_desc_action[action->type].size;
318 *pad = RTE_ALIGN_CEIL(*size, sizeof(double)) - *size;
321 /** Store a full rte_flow description. */
323 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
324 const struct rte_flow_attr *attr,
325 const struct rte_flow_item *items,
326 const struct rte_flow_action *actions)
328 struct rte_flow_desc *fd = NULL;
337 const struct rte_flow_item *item;
341 fd->items = (void *)&fd->data[off1];
343 struct rte_flow_item *dst = NULL;
345 if ((size_t)item->type >=
346 RTE_DIM(rte_flow_desc_item) ||
347 !rte_flow_desc_item[item->type].name) {
352 dst = memcpy(fd->data + off1, item,
354 off1 += sizeof(*item);
355 flow_item_spec_size(item, &tmp, &pad);
358 dst->spec = memcpy(fd->data + off2,
364 dst->last = memcpy(fd->data + off2,
370 dst->mask = memcpy(fd->data + off2,
374 off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
375 } while ((item++)->type != RTE_FLOW_ITEM_TYPE_END);
376 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
379 const struct rte_flow_action *action;
383 fd->actions = (void *)&fd->data[off1];
385 struct rte_flow_action *dst = NULL;
387 if ((size_t)action->type >=
388 RTE_DIM(rte_flow_desc_action) ||
389 !rte_flow_desc_action[action->type].name) {
394 dst = memcpy(fd->data + off1, action,
396 off1 += sizeof(*action);
397 flow_action_conf_size(action, &tmp, &pad);
400 dst->conf = memcpy(fd->data + off2,
404 off2 = RTE_ALIGN_CEIL(off2, sizeof(double));
405 } while ((action++)->type != RTE_FLOW_ACTION_TYPE_END);
409 off1 = RTE_ALIGN_CEIL(off1, sizeof(double));
410 tmp = RTE_ALIGN_CEIL(offsetof(struct rte_flow_desc, data),
412 size = tmp + off1 + off2;
417 *fd = (const struct rte_flow_desc) {
421 tmp -= offsetof(struct rte_flow_desc, data);