4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
44 #include <rte_debug.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_ethdev_pci.h>
49 #include <rte_memory.h>
50 #include <rte_memzone.h>
52 #include <rte_atomic.h>
53 #include <rte_malloc.h>
56 #include <rte_flow_driver.h>
58 #include "e1000_logs.h"
59 #include "base/e1000_api.h"
60 #include "e1000_ethdev.h"
62 #define NEXT_ITEM_OF_PATTERN(item, pattern, index) \
64 item = (pattern) + (index); \
65 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
67 item = (pattern) + (index); \
71 #define NEXT_ITEM_OF_ACTION(act, actions, index) \
73 act = (actions) + (index); \
74 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
76 act = (actions) + (index); \
81 * Please aware there's an asumption for all the parsers.
82 * rte_flow_item is using big endian, rte_flow_attr and
83 * rte_flow_action are using CPU order.
84 * Because the pattern is used to describe the packets,
85 * normally the packets should use network order.
89 * Parse the rule to see if it is a n-tuple rule.
90 * And get the n-tuple filter info BTW.
92 * The first not void item can be ETH or IPV4.
93 * The second not void item must be IPV4 if the first one is ETH.
94 * The third not void item must be UDP or TCP or SCTP
95 * The next not void item must be END.
97 * The first not void action should be QUEUE.
98 * The next not void action should be END.
102 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
103 * dst_addr 192.167.3.50 0xFFFFFFFF
104 * next_proto_id 17 0xFF
105 * UDP/TCP/ src_port 80 0xFFFF
106 * SCTP dst_port 80 0xFFFF
108 * other members in mask and spec should set to 0x00.
109 * item->last should be NULL.
112 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
113 const struct rte_flow_item pattern[],
114 const struct rte_flow_action actions[],
115 struct rte_eth_ntuple_filter *filter,
116 struct rte_flow_error *error)
118 const struct rte_flow_item *item;
119 const struct rte_flow_action *act;
120 const struct rte_flow_item_ipv4 *ipv4_spec;
121 const struct rte_flow_item_ipv4 *ipv4_mask;
122 const struct rte_flow_item_tcp *tcp_spec;
123 const struct rte_flow_item_tcp *tcp_mask;
124 const struct rte_flow_item_udp *udp_spec;
125 const struct rte_flow_item_udp *udp_mask;
126 const struct rte_flow_item_sctp *sctp_spec;
127 const struct rte_flow_item_sctp *sctp_mask;
131 rte_flow_error_set(error,
132 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
133 NULL, "NULL pattern.");
138 rte_flow_error_set(error, EINVAL,
139 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
140 NULL, "NULL action.");
144 rte_flow_error_set(error, EINVAL,
145 RTE_FLOW_ERROR_TYPE_ATTR,
146 NULL, "NULL attribute.");
153 /* the first not void item can be MAC or IPv4 */
154 NEXT_ITEM_OF_PATTERN(item, pattern, index);
156 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
157 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
158 rte_flow_error_set(error, EINVAL,
159 RTE_FLOW_ERROR_TYPE_ITEM,
160 item, "Not supported by ntuple filter");
164 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
165 /*Not supported last point for range*/
167 rte_flow_error_set(error,
169 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
170 item, "Not supported last point for range");
173 /* if the first item is MAC, the content should be NULL */
174 if (item->spec || item->mask) {
175 rte_flow_error_set(error, EINVAL,
176 RTE_FLOW_ERROR_TYPE_ITEM,
177 item, "Not supported by ntuple filter");
180 /* check if the next not void item is IPv4 */
182 NEXT_ITEM_OF_PATTERN(item, pattern, index);
183 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
184 rte_flow_error_set(error,
185 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
186 item, "Not supported by ntuple filter");
191 /* get the IPv4 info */
192 if (!item->spec || !item->mask) {
193 rte_flow_error_set(error, EINVAL,
194 RTE_FLOW_ERROR_TYPE_ITEM,
195 item, "Invalid ntuple mask");
198 /* Not supported last point for range */
200 rte_flow_error_set(error, EINVAL,
201 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
202 item, "Not supported last point for range");
206 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
208 * Only support src & dst addresses, protocol,
209 * others should be masked.
212 if (ipv4_mask->hdr.version_ihl ||
213 ipv4_mask->hdr.type_of_service ||
214 ipv4_mask->hdr.total_length ||
215 ipv4_mask->hdr.packet_id ||
216 ipv4_mask->hdr.fragment_offset ||
217 ipv4_mask->hdr.time_to_live ||
218 ipv4_mask->hdr.hdr_checksum) {
219 rte_flow_error_set(error,
220 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
221 item, "Not supported by ntuple filter");
225 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
226 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
227 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
229 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
230 filter->dst_ip = ipv4_spec->hdr.dst_addr;
231 filter->src_ip = ipv4_spec->hdr.src_addr;
232 filter->proto = ipv4_spec->hdr.next_proto_id;
234 /* check if the next not void item is TCP or UDP or SCTP */
236 NEXT_ITEM_OF_PATTERN(item, pattern, index);
237 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
238 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
239 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
240 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
241 rte_flow_error_set(error, EINVAL,
242 RTE_FLOW_ERROR_TYPE_ITEM,
243 item, "Not supported by ntuple filter");
247 /* Not supported last point for range */
249 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
250 rte_flow_error_set(error, EINVAL,
251 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
252 item, "Not supported last point for range");
256 /* get the TCP/UDP/SCTP info */
257 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
258 if (item->spec && item->mask) {
259 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
262 * Only support src & dst ports, tcp flags,
263 * others should be masked.
265 if (tcp_mask->hdr.sent_seq ||
266 tcp_mask->hdr.recv_ack ||
267 tcp_mask->hdr.data_off ||
268 tcp_mask->hdr.rx_win ||
269 tcp_mask->hdr.cksum ||
270 tcp_mask->hdr.tcp_urp) {
272 sizeof(struct rte_eth_ntuple_filter));
273 rte_flow_error_set(error, EINVAL,
274 RTE_FLOW_ERROR_TYPE_ITEM,
275 item, "Not supported by ntuple filter");
279 filter->dst_port_mask = tcp_mask->hdr.dst_port;
280 filter->src_port_mask = tcp_mask->hdr.src_port;
281 if (tcp_mask->hdr.tcp_flags == 0xFF) {
282 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
283 } else if (!tcp_mask->hdr.tcp_flags) {
284 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
287 sizeof(struct rte_eth_ntuple_filter));
288 rte_flow_error_set(error, EINVAL,
289 RTE_FLOW_ERROR_TYPE_ITEM,
290 item, "Not supported by ntuple filter");
294 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
295 filter->dst_port = tcp_spec->hdr.dst_port;
296 filter->src_port = tcp_spec->hdr.src_port;
297 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
299 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
300 if (item->spec && item->mask) {
301 udp_mask = (const struct rte_flow_item_udp *)item->mask;
304 * Only support src & dst ports,
305 * others should be masked.
307 if (udp_mask->hdr.dgram_len ||
308 udp_mask->hdr.dgram_cksum) {
310 sizeof(struct rte_eth_ntuple_filter));
311 rte_flow_error_set(error, EINVAL,
312 RTE_FLOW_ERROR_TYPE_ITEM,
313 item, "Not supported by ntuple filter");
317 filter->dst_port_mask = udp_mask->hdr.dst_port;
318 filter->src_port_mask = udp_mask->hdr.src_port;
320 udp_spec = (const struct rte_flow_item_udp *)item->spec;
321 filter->dst_port = udp_spec->hdr.dst_port;
322 filter->src_port = udp_spec->hdr.src_port;
325 if (item->spec && item->mask) {
326 sctp_mask = (const struct rte_flow_item_sctp *)
330 * Only support src & dst ports,
331 * others should be masked.
333 if (sctp_mask->hdr.tag ||
334 sctp_mask->hdr.cksum) {
336 sizeof(struct rte_eth_ntuple_filter));
337 rte_flow_error_set(error, EINVAL,
338 RTE_FLOW_ERROR_TYPE_ITEM,
339 item, "Not supported by ntuple filter");
343 filter->dst_port_mask = sctp_mask->hdr.dst_port;
344 filter->src_port_mask = sctp_mask->hdr.src_port;
346 sctp_spec = (const struct rte_flow_item_sctp *)
348 filter->dst_port = sctp_spec->hdr.dst_port;
349 filter->src_port = sctp_spec->hdr.src_port;
352 /* check if the next not void item is END */
354 NEXT_ITEM_OF_PATTERN(item, pattern, index);
355 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
356 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
357 rte_flow_error_set(error, EINVAL,
358 RTE_FLOW_ERROR_TYPE_ITEM,
359 item, "Not supported by ntuple filter");
367 * n-tuple only supports forwarding,
368 * check if the first not void action is QUEUE.
370 NEXT_ITEM_OF_ACTION(act, actions, index);
371 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
372 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
373 rte_flow_error_set(error, EINVAL,
374 RTE_FLOW_ERROR_TYPE_ACTION,
375 item, "Not supported action.");
379 ((const struct rte_flow_action_queue *)act->conf)->index;
381 /* check if the next not void item is END */
383 NEXT_ITEM_OF_ACTION(act, actions, index);
384 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
385 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
386 rte_flow_error_set(error, EINVAL,
387 RTE_FLOW_ERROR_TYPE_ACTION,
388 act, "Not supported action.");
393 /* must be input direction */
394 if (!attr->ingress) {
395 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
396 rte_flow_error_set(error, EINVAL,
397 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
398 attr, "Only support ingress.");
404 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
405 rte_flow_error_set(error, EINVAL,
406 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
407 attr, "Not support egress.");
411 if (attr->priority > 0xFFFF) {
412 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
413 rte_flow_error_set(error, EINVAL,
414 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
415 attr, "Error priority.");
418 filter->priority = (uint16_t)attr->priority;
423 /* a specific function for igb because the flags is specific */
425 igb_parse_ntuple_filter(struct rte_eth_dev *dev,
426 const struct rte_flow_attr *attr,
427 const struct rte_flow_item pattern[],
428 const struct rte_flow_action actions[],
429 struct rte_eth_ntuple_filter *filter,
430 struct rte_flow_error *error)
432 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
435 MAC_TYPE_FILTER_SUP(hw->mac.type);
437 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
442 /* Igb doesn't support many priorities. */
443 if (filter->priority > E1000_2TUPLE_MAX_PRI) {
444 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
445 rte_flow_error_set(error, EINVAL,
446 RTE_FLOW_ERROR_TYPE_ITEM,
447 NULL, "Priority not supported by ntuple filter");
451 if (hw->mac.type == e1000_82576) {
452 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
453 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
454 rte_flow_error_set(error, EINVAL,
455 RTE_FLOW_ERROR_TYPE_ITEM,
456 NULL, "queue number not "
457 "supported by ntuple filter");
460 filter->flags |= RTE_5TUPLE_FLAGS;
462 if (filter->src_ip_mask || filter->dst_ip_mask ||
463 filter->src_port_mask) {
464 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
465 rte_flow_error_set(error, EINVAL,
466 RTE_FLOW_ERROR_TYPE_ITEM,
467 NULL, "only two tuple are "
468 "supported by this filter");
471 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
472 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
473 rte_flow_error_set(error, EINVAL,
474 RTE_FLOW_ERROR_TYPE_ITEM,
475 NULL, "queue number not "
476 "supported by ntuple filter");
479 filter->flags |= RTE_2TUPLE_FLAGS;
486 * Check if the flow rule is supported by igb.
487 * It only checkes the format. Don't guarantee the rule can be programmed into
488 * the HW. Because there can be no enough room for the rule.
491 igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
492 const struct rte_flow_attr *attr,
493 const struct rte_flow_item pattern[],
494 const struct rte_flow_action actions[],
495 struct rte_flow_error *error)
497 struct rte_eth_ntuple_filter ntuple_filter;
500 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
501 ret = igb_parse_ntuple_filter(dev, attr, pattern,
502 actions, &ntuple_filter, error);
509 const struct rte_flow_ops igb_flow_ops = {