1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
14 #include <rte_debug.h>
15 #include <rte_ether.h>
16 #include <rte_ethdev_driver.h>
18 #include <rte_malloc.h>
19 #include <rte_eth_ctrl.h>
20 #include <rte_tailq.h>
22 #include "ipn3ke_rawdev_api.h"
23 #include "ipn3ke_flow.h"
24 #include "ipn3ke_logs.h"
25 #include "ipn3ke_ethdev.h"
27 /** Static initializer for items. */
28 #define FLOW_PATTERNS(...) \
29 ((const enum rte_flow_item_type []) { \
30 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
33 enum IPN3KE_HASH_KEY_TYPE {
34 IPN3KE_HASH_KEY_VXLAN,
38 IPN3KE_HASH_KEY_IP_TCP,
39 IPN3KE_HASH_KEY_IP_UDP,
40 IPN3KE_HASH_KEY_IP_NVGRE,
41 IPN3KE_HASH_KEY_VXLAN_IP_UDP,
44 struct ipn3ke_flow_parse {
45 uint32_t mark:1; /**< Set if the flow is marked. */
46 uint32_t drop:1; /**< ACL drop. */
47 uint32_t key_type:IPN3KE_FLOW_KEY_ID_BITS;
48 uint32_t mark_id:IPN3KE_FLOW_RESULT_UID_BITS; /**< Mark identifier. */
49 uint8_t key_len; /**< Length in bit. */
50 uint8_t key[BITS_TO_BYTES(IPN3KE_FLOW_KEY_DATA_BITS)];
54 typedef int (*pattern_filter_t)(const struct rte_flow_item patterns[],
55 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser);
58 struct ipn3ke_flow_pattern {
59 const enum rte_flow_item_type *const items;
61 pattern_filter_t filter;
66 * typedef struct packed {
67 * logic [47:0] vxlan_inner_mac;
68 * logic [23:0] vxlan_vni;
72 * RTE_FLOW_ITEM_TYPE_VXLAN
73 * RTE_FLOW_ITEM_TYPE_ETH
76 ipn3ke_pattern_vxlan(const struct rte_flow_item patterns[],
77 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
79 const struct rte_flow_item_vxlan *vxlan = NULL;
80 const struct rte_flow_item_eth *eth = NULL;
81 const struct rte_flow_item *item;
83 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
84 if (/*!item->spec || item->mask || */item->last) {
85 rte_flow_error_set(error,
87 RTE_FLOW_ERROR_TYPE_ITEM,
89 "Only support item with 'spec'");
94 case RTE_FLOW_ITEM_TYPE_ETH:
97 rte_memcpy(&parser->key[0],
102 case RTE_FLOW_ITEM_TYPE_VXLAN:
105 rte_memcpy(&parser->key[6], vxlan->vni, 3);
109 rte_flow_error_set(error,
111 RTE_FLOW_ERROR_TYPE_ITEM,
113 "Not support item type");
118 if (vxlan != NULL && eth != NULL) {
119 parser->key_len = 48 + 24;
123 rte_flow_error_set(error,
125 RTE_FLOW_ERROR_TYPE_ITEM,
127 "Missed some patterns");
133 * typedef struct packed {
134 * logic [47:0] eth_smac;
138 * RTE_FLOW_ITEM_TYPE_ETH
141 ipn3ke_pattern_mac(const struct rte_flow_item patterns[],
142 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
144 const struct rte_flow_item_eth *eth = NULL;
145 const struct rte_flow_item *item;
147 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
148 if (!item->spec || item->mask || item->last) {
149 rte_flow_error_set(error,
151 RTE_FLOW_ERROR_TYPE_ITEM,
153 "Only support item with 'spec'");
157 switch (item->type) {
158 case RTE_FLOW_ITEM_TYPE_ETH:
161 rte_memcpy(parser->key,
167 rte_flow_error_set(error,
169 RTE_FLOW_ERROR_TYPE_ITEM,
171 "Not support item type");
177 parser->key_len = 48;
181 rte_flow_error_set(error,
183 RTE_FLOW_ERROR_TYPE_ITEM,
185 "Missed some patterns");
191 * typedef struct packed {
192 * logic [11:0] outer_vlan_id;
193 * logic [11:0] inner_vlan_id;
197 * RTE_FLOW_ITEM_TYPE_VLAN
198 * RTE_FLOW_ITEM_TYPE_VLAN
201 ipn3ke_pattern_qinq(const struct rte_flow_item patterns[],
202 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
204 const struct rte_flow_item_vlan *outer_vlan = NULL;
205 const struct rte_flow_item_vlan *inner_vlan = NULL;
206 const struct rte_flow_item *item;
209 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
210 if (!item->spec || item->mask || item->last) {
211 rte_flow_error_set(error,
213 RTE_FLOW_ERROR_TYPE_ITEM,
215 "Only support item with 'spec'");
219 switch (item->type) {
220 case RTE_FLOW_ITEM_TYPE_VLAN:
222 outer_vlan = item->spec;
224 tci = rte_be_to_cpu_16(outer_vlan->tci);
225 parser->key[0] = (tci & 0xff0) >> 4;
226 parser->key[1] |= (tci & 0x00f) << 4;
228 inner_vlan = item->spec;
230 tci = rte_be_to_cpu_16(inner_vlan->tci);
231 parser->key[1] |= (tci & 0xf00) >> 8;
232 parser->key[2] = (tci & 0x0ff);
237 rte_flow_error_set(error,
239 RTE_FLOW_ERROR_TYPE_ITEM,
241 "Not support item type");
246 if (outer_vlan != NULL && inner_vlan != NULL) {
247 parser->key_len = 12 + 12;
251 rte_flow_error_set(error,
253 RTE_FLOW_ERROR_TYPE_ITEM,
255 "Missed some patterns");
261 * typedef struct packed {
262 * logic [19:0] mpls_label1;
263 * logic [19:0] mpls_label2;
267 * RTE_FLOW_ITEM_TYPE_MPLS
268 * RTE_FLOW_ITEM_TYPE_MPLS
271 ipn3ke_pattern_mpls(const struct rte_flow_item patterns[],
272 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
274 const struct rte_flow_item_mpls *mpls1 = NULL;
275 const struct rte_flow_item_mpls *mpls2 = NULL;
276 const struct rte_flow_item *item;
278 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
279 if (!item->spec || item->mask || item->last) {
280 rte_flow_error_set(error,
282 RTE_FLOW_ERROR_TYPE_ITEM,
284 "Only support item with 'spec'");
288 switch (item->type) {
289 case RTE_FLOW_ITEM_TYPE_MPLS:
293 parser->key[0] = mpls1->label_tc_s[0];
294 parser->key[1] = mpls1->label_tc_s[1];
295 parser->key[2] = mpls1->label_tc_s[2] & 0xf0;
300 ((mpls2->label_tc_s[0] & 0xf0) >> 4);
302 ((mpls2->label_tc_s[0] & 0xf) << 4) |
303 ((mpls2->label_tc_s[1] & 0xf0) >> 4);
305 ((mpls2->label_tc_s[1] & 0xf) << 4) |
306 ((mpls2->label_tc_s[2] & 0xf0) >> 4);
311 rte_flow_error_set(error,
313 RTE_FLOW_ERROR_TYPE_ITEM,
315 "Not support item type");
320 if (mpls1 != NULL && mpls2 != NULL) {
321 parser->key_len = 20 + 20;
325 rte_flow_error_set(error,
327 RTE_FLOW_ERROR_TYPE_ITEM,
329 "Missed some patterns");
335 * typedef struct packed {
336 * logic [31:0] ip_sa;
337 * logic [15:0] tcp_sport;
338 * } Hash_Key_Ip_Tcp_t;
341 * RTE_FLOW_ITEM_TYPE_IPV4
342 * RTE_FLOW_ITEM_TYPE_TCP
345 ipn3ke_pattern_ip_tcp(const struct rte_flow_item patterns[],
346 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
348 const struct rte_flow_item_ipv4 *ipv4 = NULL;
349 const struct rte_flow_item_tcp *tcp = NULL;
350 const struct rte_flow_item *item;
352 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
353 if (!item->spec || item->mask || item->last) {
354 rte_flow_error_set(error,
356 RTE_FLOW_ERROR_TYPE_ITEM,
358 "Only support item with 'spec'");
362 switch (item->type) {
363 case RTE_FLOW_ITEM_TYPE_IPV4:
366 rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4);
369 case RTE_FLOW_ITEM_TYPE_TCP:
372 rte_memcpy(&parser->key[4], &tcp->hdr.src_port, 2);
376 rte_flow_error_set(error,
378 RTE_FLOW_ERROR_TYPE_ITEM,
380 "Not support item type");
385 if (ipv4 != NULL && tcp != NULL) {
386 parser->key_len = 32 + 16;
390 rte_flow_error_set(error,
392 RTE_FLOW_ERROR_TYPE_ITEM,
394 "Missed some patterns");
400 * typedef struct packed {
401 * logic [31:0] ip_sa;
402 * logic [15:0] udp_sport;
403 * } Hash_Key_Ip_Udp_t;
406 * RTE_FLOW_ITEM_TYPE_IPV4
407 * RTE_FLOW_ITEM_TYPE_UDP
410 ipn3ke_pattern_ip_udp(const struct rte_flow_item patterns[],
411 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
413 const struct rte_flow_item_ipv4 *ipv4 = NULL;
414 const struct rte_flow_item_udp *udp = NULL;
415 const struct rte_flow_item *item;
417 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
418 if (!item->spec || item->mask || item->last) {
419 rte_flow_error_set(error,
421 RTE_FLOW_ERROR_TYPE_ITEM,
423 "Only support item with 'spec'");
427 switch (item->type) {
428 case RTE_FLOW_ITEM_TYPE_IPV4:
431 rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4);
434 case RTE_FLOW_ITEM_TYPE_UDP:
437 rte_memcpy(&parser->key[4], &udp->hdr.src_port, 2);
441 rte_flow_error_set(error,
443 RTE_FLOW_ERROR_TYPE_ITEM,
445 "Not support item type");
450 if (ipv4 != NULL && udp != NULL) {
451 parser->key_len = 32 + 16;
455 rte_flow_error_set(error,
457 RTE_FLOW_ERROR_TYPE_ITEM,
459 "Missed some patterns");
465 * typedef struct packed {
466 * logic [31:0] ip_sa;
467 * logic [15:0] udp_sport;
469 * } Hash_Key_Ip_Nvgre_t;
472 * RTE_FLOW_ITEM_TYPE_IPV4
473 * RTE_FLOW_ITEM_TYPE_UDP
474 * RTE_FLOW_ITEM_TYPE_NVGRE
477 ipn3ke_pattern_ip_nvgre(const struct rte_flow_item patterns[],
478 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
480 const struct rte_flow_item_nvgre *nvgre = NULL;
481 const struct rte_flow_item_ipv4 *ipv4 = NULL;
482 const struct rte_flow_item_udp *udp = NULL;
483 const struct rte_flow_item *item;
485 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
486 if (!item->spec || item->mask || item->last) {
487 rte_flow_error_set(error,
489 RTE_FLOW_ERROR_TYPE_ITEM,
491 "Only support item with 'spec'");
495 switch (item->type) {
496 case RTE_FLOW_ITEM_TYPE_IPV4:
499 rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4);
502 case RTE_FLOW_ITEM_TYPE_UDP:
505 rte_memcpy(&parser->key[4], &udp->hdr.src_port, 2);
508 case RTE_FLOW_ITEM_TYPE_NVGRE:
511 rte_memcpy(&parser->key[6], nvgre->tni, 3);
515 rte_flow_error_set(error,
517 RTE_FLOW_ERROR_TYPE_ITEM,
519 "Not support item type");
524 if (ipv4 != NULL && udp != NULL && nvgre != NULL) {
525 parser->key_len = 32 + 16 + 24;
529 rte_flow_error_set(error,
531 RTE_FLOW_ERROR_TYPE_ITEM,
533 "Missed some patterns");
539 * typedef struct packed{
540 * logic [23:0] vxlan_vni;
541 * logic [31:0] ip_sa;
542 * logic [15:0] udp_sport;
543 * } Hash_Key_Vxlan_Ip_Udp_t;
546 * RTE_FLOW_ITEM_TYPE_VXLAN
547 * RTE_FLOW_ITEM_TYPE_IPV4
548 * RTE_FLOW_ITEM_TYPE_UDP
551 ipn3ke_pattern_vxlan_ip_udp(const struct rte_flow_item patterns[],
552 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
554 const struct rte_flow_item_vxlan *vxlan = NULL;
555 const struct rte_flow_item_ipv4 *ipv4 = NULL;
556 const struct rte_flow_item_udp *udp = NULL;
557 const struct rte_flow_item *item;
559 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
560 if (!item->spec || item->mask || item->last) {
561 rte_flow_error_set(error,
563 RTE_FLOW_ERROR_TYPE_ITEM,
565 "Only support item with 'spec'");
569 switch (item->type) {
570 case RTE_FLOW_ITEM_TYPE_VXLAN:
573 rte_memcpy(&parser->key[0], vxlan->vni, 3);
576 case RTE_FLOW_ITEM_TYPE_IPV4:
579 rte_memcpy(&parser->key[3], &ipv4->hdr.src_addr, 4);
582 case RTE_FLOW_ITEM_TYPE_UDP:
585 rte_memcpy(&parser->key[7], &udp->hdr.src_port, 2);
589 rte_flow_error_set(error,
591 RTE_FLOW_ERROR_TYPE_ITEM,
593 "Not support item type");
598 if (vxlan != NULL && ipv4 != NULL && udp != NULL) {
599 parser->key_len = 24 + 32 + 16;
603 rte_flow_error_set(error,
605 RTE_FLOW_ERROR_TYPE_ITEM,
607 "Missed some patterns");
611 static const struct ipn3ke_flow_pattern ipn3ke_supported_patterns[] = {
612 [IPN3KE_HASH_KEY_VXLAN] = {
613 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VXLAN,
614 RTE_FLOW_ITEM_TYPE_ETH),
615 .filter = ipn3ke_pattern_vxlan,
618 [IPN3KE_HASH_KEY_MAC] = {
619 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_ETH),
620 .filter = ipn3ke_pattern_mac,
623 [IPN3KE_HASH_KEY_QINQ] = {
624 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VLAN,
625 RTE_FLOW_ITEM_TYPE_VLAN),
626 .filter = ipn3ke_pattern_qinq,
629 [IPN3KE_HASH_KEY_MPLS] = {
630 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_MPLS,
631 RTE_FLOW_ITEM_TYPE_MPLS),
632 .filter = ipn3ke_pattern_mpls,
635 [IPN3KE_HASH_KEY_IP_TCP] = {
636 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4,
637 RTE_FLOW_ITEM_TYPE_TCP),
638 .filter = ipn3ke_pattern_ip_tcp,
641 [IPN3KE_HASH_KEY_IP_UDP] = {
642 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4,
643 RTE_FLOW_ITEM_TYPE_UDP),
644 .filter = ipn3ke_pattern_ip_udp,
647 [IPN3KE_HASH_KEY_IP_NVGRE] = {
648 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4,
649 RTE_FLOW_ITEM_TYPE_UDP,
650 RTE_FLOW_ITEM_TYPE_NVGRE),
651 .filter = ipn3ke_pattern_ip_nvgre,
654 [IPN3KE_HASH_KEY_VXLAN_IP_UDP] = {
655 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VXLAN,
656 RTE_FLOW_ITEM_TYPE_IPV4,
657 RTE_FLOW_ITEM_TYPE_UDP),
658 .filter = ipn3ke_pattern_vxlan_ip_udp,
663 ipn3ke_flow_convert_attributes(const struct rte_flow_attr *attr,
664 struct rte_flow_error *error)
667 rte_flow_error_set(error,
669 RTE_FLOW_ERROR_TYPE_ATTR,
676 rte_flow_error_set(error,
678 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
680 "groups are not supported");
685 rte_flow_error_set(error,
687 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
689 "egress is not supported");
693 if (attr->transfer) {
694 rte_flow_error_set(error,
696 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
698 "transfer is not supported");
702 if (!attr->ingress) {
703 rte_flow_error_set(error,
705 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
707 "only ingress is supported");
715 ipn3ke_flow_convert_actions(const struct rte_flow_action actions[],
716 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
718 const struct rte_flow_action_mark *mark = NULL;
721 rte_flow_error_set(error,
723 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
729 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
730 switch (actions->type) {
731 case RTE_FLOW_ACTION_TYPE_VOID:
734 case RTE_FLOW_ACTION_TYPE_MARK:
736 rte_flow_error_set(error,
738 RTE_FLOW_ERROR_TYPE_ACTION,
744 mark = actions->conf;
746 rte_flow_error_set(error,
748 RTE_FLOW_ERROR_TYPE_ACTION,
750 "mark must be defined");
752 } else if (mark->id > IPN3KE_FLOW_RESULT_UID_MAX) {
753 rte_flow_error_set(error,
755 RTE_FLOW_ERROR_TYPE_ACTION,
757 "mark id is out of range");
762 parser->mark_id = mark->id;
765 case RTE_FLOW_ACTION_TYPE_DROP:
770 rte_flow_error_set(error,
772 RTE_FLOW_ERROR_TYPE_ACTION,
779 if (!parser->drop && !parser->mark) {
780 rte_flow_error_set(error,
782 RTE_FLOW_ERROR_TYPE_ACTION,
792 ipn3ke_match_pattern(const enum rte_flow_item_type *patterns,
793 const struct rte_flow_item *input)
795 const struct rte_flow_item *item = input;
797 while ((*patterns == item->type) &&
798 (*patterns != RTE_FLOW_ITEM_TYPE_END)) {
803 return (*patterns == RTE_FLOW_ITEM_TYPE_END &&
804 item->type == RTE_FLOW_ITEM_TYPE_END);
807 static pattern_filter_t
808 ipn3ke_find_filter_func(const struct rte_flow_item *input,
811 pattern_filter_t filter = NULL;
814 for (i = 0; i < RTE_DIM(ipn3ke_supported_patterns); i++) {
815 if (ipn3ke_match_pattern(ipn3ke_supported_patterns[i].items,
817 filter = ipn3ke_supported_patterns[i].filter;
827 ipn3ke_flow_convert_items(const struct rte_flow_item items[],
828 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
830 pattern_filter_t filter = NULL;
834 rte_flow_error_set(error,
836 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
842 filter = ipn3ke_find_filter_func(items, &idx);
845 rte_flow_error_set(error,
847 RTE_FLOW_ERROR_TYPE_ITEM,
849 "Unsupported pattern");
853 parser->key_type = idx;
855 return filter(items, error, parser);
858 /* Put the least @nbits of @data into @offset of @dst bits stream, and
859 * the @offset starts from MSB to LSB in each byte.
862 * +------+------+------+------+
864 * +------+------+------+------+
866 * |<- data: nbits ->|
871 copy_data_bits(uint8_t *dst, uint64_t data,
872 uint32_t offset, uint8_t nbits)
874 uint8_t set, *p = &dst[offset / BITS_PER_BYTE];
875 uint8_t bits_to_set = BITS_PER_BYTE - (offset % BITS_PER_BYTE);
876 uint8_t mask_to_set = 0xff >> (offset % BITS_PER_BYTE);
877 uint32_t size = offset + nbits;
879 if (nbits > (sizeof(data) * BITS_PER_BYTE)) {
880 IPN3KE_AFU_PMD_ERR("nbits is out of range");
884 while (nbits - bits_to_set >= 0) {
885 set = data >> (nbits - bits_to_set);
888 *p |= (set & mask_to_set);
890 nbits -= bits_to_set;
891 bits_to_set = BITS_PER_BYTE;
897 uint8_t shift = BITS_PER_BYTE - (size % BITS_PER_BYTE);
900 mask_to_set = 0xff << shift;
903 *p |= (set & mask_to_set);
908 ipn3ke_flow_key_generation(struct ipn3ke_flow_parse *parser,
909 struct rte_flow *flow)
911 uint32_t i, shift_bytes, len_in_bytes, offset;
915 dst = flow->rule.key;
919 IPN3KE_FLOW_KEY_ID_OFFSET,
920 IPN3KE_FLOW_KEY_ID_BITS);
922 /* The MSb of key is filled to 0 when it is less than
923 * IPN3KE_FLOW_KEY_DATA_BITS bit. And the parsed key data is
924 * save as MSB byte first in the array, it needs to move
925 * the bits before formatting them.
929 len_in_bytes = BITS_TO_BYTES(parser->key_len);
930 offset = (IPN3KE_FLOW_KEY_DATA_OFFSET +
931 IPN3KE_FLOW_KEY_DATA_BITS -
934 for (i = 0; i < len_in_bytes; i++) {
935 key = (key << 8) | parser->key[i];
937 if (++shift_bytes == sizeof(key)) {
940 copy_data_bits(dst, key, offset,
941 sizeof(key) * BITS_PER_BYTE);
942 offset += sizeof(key) * BITS_PER_BYTE;
947 if (shift_bytes != 0) {
950 rem_bits = parser->key_len % (sizeof(key) * BITS_PER_BYTE);
951 key >>= (shift_bytes * 8 - rem_bits);
952 copy_data_bits(dst, key, offset, rem_bits);
957 ipn3ke_flow_result_generation(struct ipn3ke_flow_parse *parser,
958 struct rte_flow *flow)
965 dst = flow->rule.result;
969 IPN3KE_FLOW_RESULT_ACL_OFFSET,
970 IPN3KE_FLOW_RESULT_ACL_BITS);
974 IPN3KE_FLOW_RESULT_UID_OFFSET,
975 IPN3KE_FLOW_RESULT_UID_BITS);
978 #define MHL_COMMAND_TIME_COUNT 0xFFFF
979 #define MHL_COMMAND_TIME_INTERVAL_US 10
982 ipn3ke_flow_hw_update(struct ipn3ke_hw *hw,
983 struct rte_flow *flow, uint32_t is_add)
985 uint32_t *pdata = NULL;
987 uint32_t time_out = MHL_COMMAND_TIME_COUNT;
990 IPN3KE_AFU_PMD_DEBUG("IPN3KE flow dump start\n");
992 pdata = (uint32_t *)flow->rule.key;
993 IPN3KE_AFU_PMD_DEBUG(" - key :");
995 for (i = 0; i < RTE_DIM(flow->rule.key); i++)
996 IPN3KE_AFU_PMD_DEBUG(" %02x", flow->rule.key[i]);
998 for (i = 0; i < 4; i++)
999 IPN3KE_AFU_PMD_DEBUG(" %02x", ipn3ke_swap32(pdata[3 - i]));
1000 IPN3KE_AFU_PMD_DEBUG("\n");
1002 pdata = (uint32_t *)flow->rule.result;
1003 IPN3KE_AFU_PMD_DEBUG(" - result:");
1005 for (i = 0; i < RTE_DIM(flow->rule.result); i++)
1006 IPN3KE_AFU_PMD_DEBUG(" %02x", flow->rule.result[i]);
1008 for (i = 0; i < 1; i++)
1009 IPN3KE_AFU_PMD_DEBUG(" %02x", pdata[i]);
1010 IPN3KE_AFU_PMD_DEBUG("IPN3KE flow dump end\n");
1012 pdata = (uint32_t *)flow->rule.key;
1014 IPN3KE_MASK_WRITE_REG(hw,
1015 IPN3KE_CLF_MHL_KEY_0,
1017 ipn3ke_swap32(pdata[3]),
1018 IPN3KE_CLF_MHL_KEY_MASK);
1020 IPN3KE_MASK_WRITE_REG(hw,
1021 IPN3KE_CLF_MHL_KEY_1,
1023 ipn3ke_swap32(pdata[2]),
1024 IPN3KE_CLF_MHL_KEY_MASK);
1026 IPN3KE_MASK_WRITE_REG(hw,
1027 IPN3KE_CLF_MHL_KEY_2,
1029 ipn3ke_swap32(pdata[1]),
1030 IPN3KE_CLF_MHL_KEY_MASK);
1032 IPN3KE_MASK_WRITE_REG(hw,
1033 IPN3KE_CLF_MHL_KEY_3,
1035 ipn3ke_swap32(pdata[0]),
1036 IPN3KE_CLF_MHL_KEY_MASK);
1038 pdata = (uint32_t *)flow->rule.result;
1039 IPN3KE_MASK_WRITE_REG(hw,
1042 ipn3ke_swap32(pdata[0]),
1043 IPN3KE_CLF_MHL_RES_MASK);
1045 /* insert/delete the key and result */
1047 data = IPN3KE_MASK_READ_REG(hw,
1048 IPN3KE_CLF_MHL_MGMT_CTRL,
1051 time_out = MHL_COMMAND_TIME_COUNT;
1052 while (IPN3KE_BIT_ISSET(data, IPN3KE_CLF_MHL_MGMT_CTRL_BIT_BUSY) &&
1054 data = IPN3KE_MASK_READ_REG(hw,
1055 IPN3KE_CLF_MHL_MGMT_CTRL,
1059 rte_delay_us(MHL_COMMAND_TIME_INTERVAL_US);
1064 IPN3KE_MASK_WRITE_REG(hw,
1065 IPN3KE_CLF_MHL_MGMT_CTRL,
1067 IPN3KE_CLF_MHL_MGMT_CTRL_INSERT,
1070 IPN3KE_MASK_WRITE_REG(hw,
1071 IPN3KE_CLF_MHL_MGMT_CTRL,
1073 IPN3KE_CLF_MHL_MGMT_CTRL_DELETE,
1080 ipn3ke_flow_hw_flush(struct ipn3ke_hw *hw)
1083 uint32_t time_out = MHL_COMMAND_TIME_COUNT;
1085 /* flush the MHL lookup table */
1087 data = IPN3KE_MASK_READ_REG(hw,
1088 IPN3KE_CLF_MHL_MGMT_CTRL,
1091 time_out = MHL_COMMAND_TIME_COUNT;
1092 while (IPN3KE_BIT_ISSET(data, IPN3KE_CLF_MHL_MGMT_CTRL_BIT_BUSY) &&
1094 data = IPN3KE_MASK_READ_REG(hw,
1095 IPN3KE_CLF_MHL_MGMT_CTRL,
1099 rte_delay_us(MHL_COMMAND_TIME_INTERVAL_US);
1103 IPN3KE_MASK_WRITE_REG(hw,
1104 IPN3KE_CLF_MHL_MGMT_CTRL,
1106 IPN3KE_CLF_MHL_MGMT_CTRL_FLUSH,
1113 ipn3ke_flow_convert_finalise(struct ipn3ke_hw *hw,
1114 struct ipn3ke_flow_parse *parser, struct rte_flow *flow)
1116 ipn3ke_flow_key_generation(parser, flow);
1117 ipn3ke_flow_result_generation(parser, flow);
1118 ipn3ke_flow_hw_update(hw, flow, 1);
1122 ipn3ke_flow_convert(const struct rte_flow_attr *attr,
1123 const struct rte_flow_item items[],
1124 const struct rte_flow_action actions[], struct rte_flow_error *error,
1125 struct ipn3ke_flow_parse *parser)
1129 ret = ipn3ke_flow_convert_attributes(attr, error);
1133 ret = ipn3ke_flow_convert_actions(actions, error, parser);
1137 ret = ipn3ke_flow_convert_items(items, error, parser);
1145 ipn3ke_flow_validate(__rte_unused struct rte_eth_dev *dev,
1146 const struct rte_flow_attr *attr, const struct rte_flow_item pattern[],
1147 const struct rte_flow_action actions[], struct rte_flow_error *error)
1149 struct ipn3ke_flow_parse parser = {0};
1150 return ipn3ke_flow_convert(attr, pattern, actions, error, &parser);
1153 static struct rte_flow *
1154 ipn3ke_flow_create(struct rte_eth_dev *dev,
1155 const struct rte_flow_attr *attr, const struct rte_flow_item pattern[],
1156 const struct rte_flow_action actions[], struct rte_flow_error *error)
1158 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1159 struct ipn3ke_flow_parse parser = {0};
1160 struct rte_flow *flow;
1163 if (hw->flow_num_entries == hw->flow_max_entries) {
1164 rte_flow_error_set(error,
1166 RTE_FLOW_ERROR_TYPE_HANDLE,
1168 "The flow table is full.");
1172 ret = ipn3ke_flow_convert(attr, pattern, actions, error, &parser);
1174 rte_flow_error_set(error,
1176 RTE_FLOW_ERROR_TYPE_HANDLE,
1178 "Failed to create flow.");
1182 flow = rte_zmalloc("ipn3ke_flow", sizeof(struct rte_flow), 0);
1184 rte_flow_error_set(error,
1186 RTE_FLOW_ERROR_TYPE_HANDLE,
1188 "Failed to allocate memory");
1192 ipn3ke_flow_convert_finalise(hw, &parser, flow);
1194 TAILQ_INSERT_TAIL(&hw->flow_list, flow, next);
1200 ipn3ke_flow_destroy(struct rte_eth_dev *dev,
1201 struct rte_flow *flow, struct rte_flow_error *error)
1203 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1206 ret = ipn3ke_flow_hw_update(hw, flow, 0);
1208 TAILQ_REMOVE(&hw->flow_list, flow, next);
1211 rte_flow_error_set(error,
1213 RTE_FLOW_ERROR_TYPE_HANDLE,
1215 "Failed to destroy flow.");
1222 ipn3ke_flow_flush(struct rte_eth_dev *dev,
1223 __rte_unused struct rte_flow_error *error)
1225 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1226 struct rte_flow *flow, *temp;
1228 TAILQ_FOREACH_SAFE(flow, &hw->flow_list, next, temp) {
1229 TAILQ_REMOVE(&hw->flow_list, flow, next);
1233 return ipn3ke_flow_hw_flush(hw);
1236 int ipn3ke_flow_init(void *dev)
1238 struct ipn3ke_hw *hw = (struct ipn3ke_hw *)dev;
1241 /* disable rx classifier bypass */
1242 IPN3KE_MASK_WRITE_REG(hw,
1247 data = IPN3KE_MASK_READ_REG(hw,
1251 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_RX_TEST: %x\n", data);
1253 /* configure base mac address */
1254 IPN3KE_MASK_WRITE_REG(hw,
1255 IPN3KE_CLF_BASE_DST_MAC_ADDR_HI,
1261 data = IPN3KE_MASK_READ_REG(hw,
1262 IPN3KE_CLF_BASE_DST_MAC_ADDR_HI,
1265 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_BASE_DST_MAC_ADDR_HI: %x\n", data);
1267 IPN3KE_MASK_WRITE_REG(hw,
1268 IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW,
1274 data = IPN3KE_MASK_READ_REG(hw,
1275 IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW,
1278 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW: %x\n", data);
1281 /* configure hash lookup rules enable */
1282 IPN3KE_MASK_WRITE_REG(hw,
1283 IPN3KE_CLF_LKUP_ENABLE,
1289 data = IPN3KE_MASK_READ_REG(hw,
1290 IPN3KE_CLF_LKUP_ENABLE,
1293 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_LKUP_ENABLE: %x\n", data);
1296 /* configure rx parse config, settings associatied with VxLAN */
1297 IPN3KE_MASK_WRITE_REG(hw,
1298 IPN3KE_CLF_RX_PARSE_CFG,
1304 data = IPN3KE_MASK_READ_REG(hw,
1305 IPN3KE_CLF_RX_PARSE_CFG,
1308 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_RX_PARSE_CFG: %x\n", data);
1311 /* configure QinQ S-Tag */
1312 IPN3KE_MASK_WRITE_REG(hw,
1313 IPN3KE_CLF_QINQ_STAG,
1319 data = IPN3KE_MASK_READ_REG(hw,
1320 IPN3KE_CLF_QINQ_STAG,
1323 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_QINQ_STAG: %x\n", data);
1326 /* configure gen ctrl */
1327 IPN3KE_MASK_WRITE_REG(hw,
1328 IPN3KE_CLF_MHL_GEN_CTRL,
1334 data = IPN3KE_MASK_READ_REG(hw,
1335 IPN3KE_CLF_MHL_GEN_CTRL,
1338 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_MHL_GEN_CTRL: %x\n", data);
1341 /* clear monitoring register */
1342 IPN3KE_MASK_WRITE_REG(hw,
1343 IPN3KE_CLF_MHL_MON_0,
1349 data = IPN3KE_MASK_READ_REG(hw,
1350 IPN3KE_CLF_MHL_MON_0,
1353 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_MHL_MON_0: %x\n", data);
1356 ipn3ke_flow_hw_flush(hw);
1358 TAILQ_INIT(&hw->flow_list);
1359 hw->flow_max_entries = IPN3KE_MASK_READ_REG(hw,
1363 hw->flow_num_entries = 0;
1368 const struct rte_flow_ops ipn3ke_flow_ops = {
1369 .validate = ipn3ke_flow_validate,
1370 .create = ipn3ke_flow_create,
1371 .destroy = ipn3ke_flow_destroy,
1372 .flush = ipn3ke_flow_flush,