4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
63 #include <rte_hash_crc.h>
65 #include <rte_flow_driver.h>
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
78 static int ixgbe_flow_flush(struct rte_eth_dev *dev,
79 struct rte_flow_error *error);
81 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
82 const struct rte_flow_item pattern[],
83 const struct rte_flow_action actions[],
84 struct rte_eth_ntuple_filter *filter,
85 struct rte_flow_error *error);
87 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
88 const struct rte_flow_item pattern[],
89 const struct rte_flow_action actions[],
90 struct rte_eth_ntuple_filter *filter,
91 struct rte_flow_error *error);
93 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
94 const struct rte_flow_attr *attr,
95 const struct rte_flow_item pattern[],
96 const struct rte_flow_action actions[],
97 struct rte_flow_error *error);
99 const struct rte_flow_ops ixgbe_flow_ops = {
107 #define IXGBE_MIN_N_TUPLE_PRIO 1
108 #define IXGBE_MAX_N_TUPLE_PRIO 7
109 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
111 item = pattern + index;\
112 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
114 item = pattern + index; \
118 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
120 act = actions + index; \
121 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
123 act = actions + index; \
128 * Please aware there's an asumption for all the parsers.
129 * rte_flow_item is using big endian, rte_flow_attr and
130 * rte_flow_action are using CPU order.
131 * Because the pattern is used to describe the packets,
132 * normally the packets should use network order.
136 * Parse the rule to see if it is a n-tuple rule.
137 * And get the n-tuple filter info BTW.
139 * The first not void item can be ETH or IPV4.
140 * The second not void item must be IPV4 if the first one is ETH.
141 * The third not void item must be UDP or TCP.
142 * The next not void item must be END.
144 * The first not void action should be QUEUE.
145 * The next not void action should be END.
149 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
150 * dst_addr 192.167.3.50 0xFFFFFFFF
151 * next_proto_id 17 0xFF
152 * UDP/TCP src_port 80 0xFFFF
155 * other members in mask and spec should set to 0x00.
156 * item->last should be NULL.
159 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
160 const struct rte_flow_item pattern[],
161 const struct rte_flow_action actions[],
162 struct rte_eth_ntuple_filter *filter,
163 struct rte_flow_error *error)
165 const struct rte_flow_item *item;
166 const struct rte_flow_action *act;
167 const struct rte_flow_item_ipv4 *ipv4_spec;
168 const struct rte_flow_item_ipv4 *ipv4_mask;
169 const struct rte_flow_item_tcp *tcp_spec;
170 const struct rte_flow_item_tcp *tcp_mask;
171 const struct rte_flow_item_udp *udp_spec;
172 const struct rte_flow_item_udp *udp_mask;
176 rte_flow_error_set(error,
177 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
178 NULL, "NULL pattern.");
183 rte_flow_error_set(error, EINVAL,
184 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
185 NULL, "NULL action.");
189 rte_flow_error_set(error, EINVAL,
190 RTE_FLOW_ERROR_TYPE_ATTR,
191 NULL, "NULL attribute.");
198 /* the first not void item can be MAC or IPv4 */
199 NEXT_ITEM_OF_PATTERN(item, pattern, index);
201 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
202 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
203 rte_flow_error_set(error, EINVAL,
204 RTE_FLOW_ERROR_TYPE_ITEM,
205 item, "Not supported by ntuple filter");
209 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
210 /*Not supported last point for range*/
212 rte_flow_error_set(error,
214 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
215 item, "Not supported last point for range");
219 /* if the first item is MAC, the content should be NULL */
220 if (item->spec || item->mask) {
221 rte_flow_error_set(error, EINVAL,
222 RTE_FLOW_ERROR_TYPE_ITEM,
223 item, "Not supported by ntuple filter");
226 /* check if the next not void item is IPv4 */
228 NEXT_ITEM_OF_PATTERN(item, pattern, index);
229 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
230 rte_flow_error_set(error,
231 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
232 item, "Not supported by ntuple filter");
237 /* get the IPv4 info */
238 if (!item->spec || !item->mask) {
239 rte_flow_error_set(error, EINVAL,
240 RTE_FLOW_ERROR_TYPE_ITEM,
241 item, "Invalid ntuple mask");
244 /*Not supported last point for range*/
246 rte_flow_error_set(error, EINVAL,
247 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
248 item, "Not supported last point for range");
253 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
255 * Only support src & dst addresses, protocol,
256 * others should be masked.
258 if (ipv4_mask->hdr.version_ihl ||
259 ipv4_mask->hdr.type_of_service ||
260 ipv4_mask->hdr.total_length ||
261 ipv4_mask->hdr.packet_id ||
262 ipv4_mask->hdr.fragment_offset ||
263 ipv4_mask->hdr.time_to_live ||
264 ipv4_mask->hdr.hdr_checksum) {
265 rte_flow_error_set(error,
266 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
267 item, "Not supported by ntuple filter");
271 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
272 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
273 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
275 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
276 filter->dst_ip = ipv4_spec->hdr.dst_addr;
277 filter->src_ip = ipv4_spec->hdr.src_addr;
278 filter->proto = ipv4_spec->hdr.next_proto_id;
280 /* check if the next not void item is TCP or UDP */
282 NEXT_ITEM_OF_PATTERN(item, pattern, index);
283 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
284 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
285 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
286 rte_flow_error_set(error, EINVAL,
287 RTE_FLOW_ERROR_TYPE_ITEM,
288 item, "Not supported by ntuple filter");
292 /* get the TCP/UDP info */
293 if (!item->spec || !item->mask) {
294 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
295 rte_flow_error_set(error, EINVAL,
296 RTE_FLOW_ERROR_TYPE_ITEM,
297 item, "Invalid ntuple mask");
301 /*Not supported last point for range*/
303 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
304 rte_flow_error_set(error, EINVAL,
305 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
306 item, "Not supported last point for range");
311 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
312 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
315 * Only support src & dst ports, tcp flags,
316 * others should be masked.
318 if (tcp_mask->hdr.sent_seq ||
319 tcp_mask->hdr.recv_ack ||
320 tcp_mask->hdr.data_off ||
321 tcp_mask->hdr.rx_win ||
322 tcp_mask->hdr.cksum ||
323 tcp_mask->hdr.tcp_urp) {
325 sizeof(struct rte_eth_ntuple_filter));
326 rte_flow_error_set(error, EINVAL,
327 RTE_FLOW_ERROR_TYPE_ITEM,
328 item, "Not supported by ntuple filter");
332 filter->dst_port_mask = tcp_mask->hdr.dst_port;
333 filter->src_port_mask = tcp_mask->hdr.src_port;
334 if (tcp_mask->hdr.tcp_flags == 0xFF) {
335 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
336 } else if (!tcp_mask->hdr.tcp_flags) {
337 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
339 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
340 rte_flow_error_set(error, EINVAL,
341 RTE_FLOW_ERROR_TYPE_ITEM,
342 item, "Not supported by ntuple filter");
346 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
347 filter->dst_port = tcp_spec->hdr.dst_port;
348 filter->src_port = tcp_spec->hdr.src_port;
349 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
351 udp_mask = (const struct rte_flow_item_udp *)item->mask;
354 * Only support src & dst ports,
355 * others should be masked.
357 if (udp_mask->hdr.dgram_len ||
358 udp_mask->hdr.dgram_cksum) {
360 sizeof(struct rte_eth_ntuple_filter));
361 rte_flow_error_set(error, EINVAL,
362 RTE_FLOW_ERROR_TYPE_ITEM,
363 item, "Not supported by ntuple filter");
367 filter->dst_port_mask = udp_mask->hdr.dst_port;
368 filter->src_port_mask = udp_mask->hdr.src_port;
370 udp_spec = (const struct rte_flow_item_udp *)item->spec;
371 filter->dst_port = udp_spec->hdr.dst_port;
372 filter->src_port = udp_spec->hdr.src_port;
375 /* check if the next not void item is END */
377 NEXT_ITEM_OF_PATTERN(item, pattern, index);
378 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
379 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
380 rte_flow_error_set(error, EINVAL,
381 RTE_FLOW_ERROR_TYPE_ITEM,
382 item, "Not supported by ntuple filter");
390 * n-tuple only supports forwarding,
391 * check if the first not void action is QUEUE.
393 NEXT_ITEM_OF_ACTION(act, actions, index);
394 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
395 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
396 rte_flow_error_set(error, EINVAL,
397 RTE_FLOW_ERROR_TYPE_ACTION,
398 item, "Not supported action.");
402 ((const struct rte_flow_action_queue *)act->conf)->index;
404 /* check if the next not void item is END */
406 NEXT_ITEM_OF_ACTION(act, actions, index);
407 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
408 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
409 rte_flow_error_set(error, EINVAL,
410 RTE_FLOW_ERROR_TYPE_ACTION,
411 act, "Not supported action.");
416 /* must be input direction */
417 if (!attr->ingress) {
418 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
419 rte_flow_error_set(error, EINVAL,
420 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
421 attr, "Only support ingress.");
427 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
428 rte_flow_error_set(error, EINVAL,
429 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
430 attr, "Not support egress.");
434 if (attr->priority > 0xFFFF) {
435 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
436 rte_flow_error_set(error, EINVAL,
437 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
438 attr, "Error priority.");
441 filter->priority = (uint16_t)attr->priority;
442 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
443 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
444 filter->priority = 1;
449 /* a specific function for ixgbe because the flags is specific */
451 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
452 const struct rte_flow_item pattern[],
453 const struct rte_flow_action actions[],
454 struct rte_eth_ntuple_filter *filter,
455 struct rte_flow_error *error)
459 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
464 /* Ixgbe doesn't support tcp flags. */
465 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
466 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
467 rte_flow_error_set(error, EINVAL,
468 RTE_FLOW_ERROR_TYPE_ITEM,
469 NULL, "Not supported by ntuple filter");
473 /* Ixgbe doesn't support many priorities. */
474 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
475 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
476 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
477 rte_flow_error_set(error, EINVAL,
478 RTE_FLOW_ERROR_TYPE_ITEM,
479 NULL, "Priority not supported by ntuple filter");
483 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
484 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
485 filter->priority < IXGBE_5TUPLE_MIN_PRI)
488 /* fixed value for ixgbe */
489 filter->flags = RTE_5TUPLE_FLAGS;
494 * Check if the flow rule is supported by ixgbe.
495 * It only checkes the format. Don't guarantee the rule can be programmed into
496 * the HW. Because there can be no enough room for the rule.
499 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
500 const struct rte_flow_attr *attr,
501 const struct rte_flow_item pattern[],
502 const struct rte_flow_action actions[],
503 struct rte_flow_error *error)
505 struct rte_eth_ntuple_filter ntuple_filter;
508 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
509 ret = ixgbe_parse_ntuple_filter(attr, pattern,
510 actions, &ntuple_filter, error);
517 /* Destroy all flow rules associated with a port on ixgbe. */
519 ixgbe_flow_flush(struct rte_eth_dev *dev,
520 struct rte_flow_error *error)
524 ixgbe_clear_all_ntuple_filter(dev);
525 ixgbe_clear_all_ethertype_filter(dev);
526 ixgbe_clear_syn_filter(dev);
528 ret = ixgbe_clear_all_fdir_filter(dev);
530 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
531 NULL, "Failed to flush rule");
535 ret = ixgbe_clear_all_l2_tn_filter(dev);
537 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
538 NULL, "Failed to flush rule");