1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
14 #include <rte_debug.h>
15 #include <rte_ether.h>
16 #include <ethdev_driver.h>
18 #include <rte_malloc.h>
19 #include <rte_eth_ctrl.h>
20 #include <rte_tailq.h>
21 #include <rte_rawdev.h>
22 #include <rte_rawdev_pmd.h>
23 #include <rte_bus_ifpga.h>
24 #include <ifpga_common.h>
25 #include <ifpga_logs.h>
26 #include <ifpga_rawdev.h>
28 #include "ipn3ke_rawdev_api.h"
29 #include "ipn3ke_flow.h"
30 #include "ipn3ke_logs.h"
31 #include "ipn3ke_ethdev.h"
33 /** Static initializer for items. */
34 #define FLOW_PATTERNS(...) \
35 ((const enum rte_flow_item_type []) { \
36 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
39 enum IPN3KE_HASH_KEY_TYPE {
40 IPN3KE_HASH_KEY_VXLAN,
44 IPN3KE_HASH_KEY_IP_TCP,
45 IPN3KE_HASH_KEY_IP_UDP,
46 IPN3KE_HASH_KEY_IP_NVGRE,
47 IPN3KE_HASH_KEY_VXLAN_IP_UDP,
50 struct ipn3ke_flow_parse {
51 uint32_t mark:1; /**< Set if the flow is marked. */
52 uint32_t drop:1; /**< ACL drop. */
53 uint32_t key_type:IPN3KE_FLOW_KEY_ID_BITS;
54 uint32_t mark_id:IPN3KE_FLOW_RESULT_UID_BITS; /**< Mark identifier. */
55 uint8_t key_len; /**< Length in bit. */
56 uint8_t key[BITS_TO_BYTES(IPN3KE_FLOW_KEY_DATA_BITS)];
60 typedef int (*pattern_filter_t)(const struct rte_flow_item patterns[],
61 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser);
64 struct ipn3ke_flow_pattern {
65 const enum rte_flow_item_type *const items;
67 pattern_filter_t filter;
72 * typedef struct packed {
73 * logic [47:0] vxlan_inner_mac;
74 * logic [23:0] vxlan_vni;
78 * RTE_FLOW_ITEM_TYPE_VXLAN
79 * RTE_FLOW_ITEM_TYPE_ETH
82 ipn3ke_pattern_vxlan(const struct rte_flow_item patterns[],
83 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
85 const struct rte_flow_item_vxlan *vxlan = NULL;
86 const struct rte_flow_item_eth *eth = NULL;
87 const struct rte_flow_item *item;
89 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
90 if (/*!item->spec || item->mask || */item->last) {
91 rte_flow_error_set(error,
93 RTE_FLOW_ERROR_TYPE_ITEM,
95 "Only support item with 'spec'");
100 case RTE_FLOW_ITEM_TYPE_ETH:
103 rte_memcpy(&parser->key[0],
108 case RTE_FLOW_ITEM_TYPE_VXLAN:
111 rte_memcpy(&parser->key[6], vxlan->vni, 3);
115 rte_flow_error_set(error,
117 RTE_FLOW_ERROR_TYPE_ITEM,
119 "Not support item type");
124 if (vxlan != NULL && eth != NULL) {
125 parser->key_len = 48 + 24;
129 rte_flow_error_set(error,
131 RTE_FLOW_ERROR_TYPE_ITEM,
133 "Missed some patterns");
139 * typedef struct packed {
140 * logic [47:0] eth_smac;
144 * RTE_FLOW_ITEM_TYPE_ETH
147 ipn3ke_pattern_mac(const struct rte_flow_item patterns[],
148 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
150 const struct rte_flow_item_eth *eth = NULL;
151 const struct rte_flow_item *item;
153 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
154 if (!item->spec || item->mask || item->last) {
155 rte_flow_error_set(error,
157 RTE_FLOW_ERROR_TYPE_ITEM,
159 "Only support item with 'spec'");
163 switch (item->type) {
164 case RTE_FLOW_ITEM_TYPE_ETH:
167 rte_memcpy(parser->key,
173 rte_flow_error_set(error,
175 RTE_FLOW_ERROR_TYPE_ITEM,
177 "Not support item type");
183 parser->key_len = 48;
187 rte_flow_error_set(error,
189 RTE_FLOW_ERROR_TYPE_ITEM,
191 "Missed some patterns");
197 * typedef struct packed {
198 * logic [11:0] outer_vlan_id;
199 * logic [11:0] inner_vlan_id;
203 * RTE_FLOW_ITEM_TYPE_VLAN
204 * RTE_FLOW_ITEM_TYPE_VLAN
207 ipn3ke_pattern_qinq(const struct rte_flow_item patterns[],
208 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
210 const struct rte_flow_item_vlan *outer_vlan = NULL;
211 const struct rte_flow_item_vlan *inner_vlan = NULL;
212 const struct rte_flow_item *item;
215 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
216 if (!item->spec || item->mask || item->last) {
217 rte_flow_error_set(error,
219 RTE_FLOW_ERROR_TYPE_ITEM,
221 "Only support item with 'spec'");
225 switch (item->type) {
226 case RTE_FLOW_ITEM_TYPE_VLAN:
228 outer_vlan = item->spec;
230 tci = rte_be_to_cpu_16(outer_vlan->tci);
231 parser->key[0] = (tci & 0xff0) >> 4;
232 parser->key[1] |= (tci & 0x00f) << 4;
234 inner_vlan = item->spec;
236 tci = rte_be_to_cpu_16(inner_vlan->tci);
237 parser->key[1] |= (tci & 0xf00) >> 8;
238 parser->key[2] = (tci & 0x0ff);
243 rte_flow_error_set(error,
245 RTE_FLOW_ERROR_TYPE_ITEM,
247 "Not support item type");
252 if (outer_vlan != NULL && inner_vlan != NULL) {
253 parser->key_len = 12 + 12;
257 rte_flow_error_set(error,
259 RTE_FLOW_ERROR_TYPE_ITEM,
261 "Missed some patterns");
267 * typedef struct packed {
268 * logic [19:0] mpls_label1;
269 * logic [19:0] mpls_label2;
273 * RTE_FLOW_ITEM_TYPE_MPLS
274 * RTE_FLOW_ITEM_TYPE_MPLS
277 ipn3ke_pattern_mpls(const struct rte_flow_item patterns[],
278 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
280 const struct rte_flow_item_mpls *mpls1 = NULL;
281 const struct rte_flow_item_mpls *mpls2 = NULL;
282 const struct rte_flow_item *item;
284 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
285 if (!item->spec || item->mask || item->last) {
286 rte_flow_error_set(error,
288 RTE_FLOW_ERROR_TYPE_ITEM,
290 "Only support item with 'spec'");
294 switch (item->type) {
295 case RTE_FLOW_ITEM_TYPE_MPLS:
299 parser->key[0] = mpls1->label_tc_s[0];
300 parser->key[1] = mpls1->label_tc_s[1];
301 parser->key[2] = mpls1->label_tc_s[2] & 0xf0;
306 ((mpls2->label_tc_s[0] & 0xf0) >> 4);
308 ((mpls2->label_tc_s[0] & 0xf) << 4) |
309 ((mpls2->label_tc_s[1] & 0xf0) >> 4);
311 ((mpls2->label_tc_s[1] & 0xf) << 4) |
312 ((mpls2->label_tc_s[2] & 0xf0) >> 4);
317 rte_flow_error_set(error,
319 RTE_FLOW_ERROR_TYPE_ITEM,
321 "Not support item type");
326 if (mpls1 != NULL && mpls2 != NULL) {
327 parser->key_len = 20 + 20;
331 rte_flow_error_set(error,
333 RTE_FLOW_ERROR_TYPE_ITEM,
335 "Missed some patterns");
341 * typedef struct packed {
342 * logic [31:0] ip_sa;
343 * logic [15:0] tcp_sport;
344 * } Hash_Key_Ip_Tcp_t;
347 * RTE_FLOW_ITEM_TYPE_IPV4
348 * RTE_FLOW_ITEM_TYPE_TCP
351 ipn3ke_pattern_ip_tcp(const struct rte_flow_item patterns[],
352 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
354 const struct rte_flow_item_ipv4 *ipv4 = NULL;
355 const struct rte_flow_item_tcp *tcp = NULL;
356 const struct rte_flow_item *item;
358 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
359 if (!item->spec || item->mask || item->last) {
360 rte_flow_error_set(error,
362 RTE_FLOW_ERROR_TYPE_ITEM,
364 "Only support item with 'spec'");
368 switch (item->type) {
369 case RTE_FLOW_ITEM_TYPE_IPV4:
372 rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4);
375 case RTE_FLOW_ITEM_TYPE_TCP:
378 rte_memcpy(&parser->key[4], &tcp->hdr.src_port, 2);
382 rte_flow_error_set(error,
384 RTE_FLOW_ERROR_TYPE_ITEM,
386 "Not support item type");
391 if (ipv4 != NULL && tcp != NULL) {
392 parser->key_len = 32 + 16;
396 rte_flow_error_set(error,
398 RTE_FLOW_ERROR_TYPE_ITEM,
400 "Missed some patterns");
406 * typedef struct packed {
407 * logic [31:0] ip_sa;
408 * logic [15:0] udp_sport;
409 * } Hash_Key_Ip_Udp_t;
412 * RTE_FLOW_ITEM_TYPE_IPV4
413 * RTE_FLOW_ITEM_TYPE_UDP
416 ipn3ke_pattern_ip_udp(const struct rte_flow_item patterns[],
417 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
419 const struct rte_flow_item_ipv4 *ipv4 = NULL;
420 const struct rte_flow_item_udp *udp = NULL;
421 const struct rte_flow_item *item;
423 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
424 if (!item->spec || item->mask || item->last) {
425 rte_flow_error_set(error,
427 RTE_FLOW_ERROR_TYPE_ITEM,
429 "Only support item with 'spec'");
433 switch (item->type) {
434 case RTE_FLOW_ITEM_TYPE_IPV4:
437 rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4);
440 case RTE_FLOW_ITEM_TYPE_UDP:
443 rte_memcpy(&parser->key[4], &udp->hdr.src_port, 2);
447 rte_flow_error_set(error,
449 RTE_FLOW_ERROR_TYPE_ITEM,
451 "Not support item type");
456 if (ipv4 != NULL && udp != NULL) {
457 parser->key_len = 32 + 16;
461 rte_flow_error_set(error,
463 RTE_FLOW_ERROR_TYPE_ITEM,
465 "Missed some patterns");
471 * typedef struct packed {
472 * logic [31:0] ip_sa;
473 * logic [15:0] udp_sport;
475 * } Hash_Key_Ip_Nvgre_t;
478 * RTE_FLOW_ITEM_TYPE_IPV4
479 * RTE_FLOW_ITEM_TYPE_UDP
480 * RTE_FLOW_ITEM_TYPE_NVGRE
483 ipn3ke_pattern_ip_nvgre(const struct rte_flow_item patterns[],
484 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
486 const struct rte_flow_item_nvgre *nvgre = NULL;
487 const struct rte_flow_item_ipv4 *ipv4 = NULL;
488 const struct rte_flow_item_udp *udp = NULL;
489 const struct rte_flow_item *item;
491 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
492 if (!item->spec || item->mask || item->last) {
493 rte_flow_error_set(error,
495 RTE_FLOW_ERROR_TYPE_ITEM,
497 "Only support item with 'spec'");
501 switch (item->type) {
502 case RTE_FLOW_ITEM_TYPE_IPV4:
505 rte_memcpy(&parser->key[0], &ipv4->hdr.src_addr, 4);
508 case RTE_FLOW_ITEM_TYPE_UDP:
511 rte_memcpy(&parser->key[4], &udp->hdr.src_port, 2);
514 case RTE_FLOW_ITEM_TYPE_NVGRE:
517 rte_memcpy(&parser->key[6], nvgre->tni, 3);
521 rte_flow_error_set(error,
523 RTE_FLOW_ERROR_TYPE_ITEM,
525 "Not support item type");
530 if (ipv4 != NULL && udp != NULL && nvgre != NULL) {
531 parser->key_len = 32 + 16 + 24;
535 rte_flow_error_set(error,
537 RTE_FLOW_ERROR_TYPE_ITEM,
539 "Missed some patterns");
545 * typedef struct packed{
546 * logic [23:0] vxlan_vni;
547 * logic [31:0] ip_sa;
548 * logic [15:0] udp_sport;
549 * } Hash_Key_Vxlan_Ip_Udp_t;
552 * RTE_FLOW_ITEM_TYPE_VXLAN
553 * RTE_FLOW_ITEM_TYPE_IPV4
554 * RTE_FLOW_ITEM_TYPE_UDP
557 ipn3ke_pattern_vxlan_ip_udp(const struct rte_flow_item patterns[],
558 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
560 const struct rte_flow_item_vxlan *vxlan = NULL;
561 const struct rte_flow_item_ipv4 *ipv4 = NULL;
562 const struct rte_flow_item_udp *udp = NULL;
563 const struct rte_flow_item *item;
565 for (item = patterns; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
566 if (!item->spec || item->mask || item->last) {
567 rte_flow_error_set(error,
569 RTE_FLOW_ERROR_TYPE_ITEM,
571 "Only support item with 'spec'");
575 switch (item->type) {
576 case RTE_FLOW_ITEM_TYPE_VXLAN:
579 rte_memcpy(&parser->key[0], vxlan->vni, 3);
582 case RTE_FLOW_ITEM_TYPE_IPV4:
585 rte_memcpy(&parser->key[3], &ipv4->hdr.src_addr, 4);
588 case RTE_FLOW_ITEM_TYPE_UDP:
591 rte_memcpy(&parser->key[7], &udp->hdr.src_port, 2);
595 rte_flow_error_set(error,
597 RTE_FLOW_ERROR_TYPE_ITEM,
599 "Not support item type");
604 if (vxlan != NULL && ipv4 != NULL && udp != NULL) {
605 parser->key_len = 24 + 32 + 16;
609 rte_flow_error_set(error,
611 RTE_FLOW_ERROR_TYPE_ITEM,
613 "Missed some patterns");
617 static const struct ipn3ke_flow_pattern ipn3ke_supported_patterns[] = {
618 [IPN3KE_HASH_KEY_VXLAN] = {
619 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VXLAN,
620 RTE_FLOW_ITEM_TYPE_ETH),
621 .filter = ipn3ke_pattern_vxlan,
624 [IPN3KE_HASH_KEY_MAC] = {
625 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_ETH),
626 .filter = ipn3ke_pattern_mac,
629 [IPN3KE_HASH_KEY_QINQ] = {
630 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VLAN,
631 RTE_FLOW_ITEM_TYPE_VLAN),
632 .filter = ipn3ke_pattern_qinq,
635 [IPN3KE_HASH_KEY_MPLS] = {
636 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_MPLS,
637 RTE_FLOW_ITEM_TYPE_MPLS),
638 .filter = ipn3ke_pattern_mpls,
641 [IPN3KE_HASH_KEY_IP_TCP] = {
642 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4,
643 RTE_FLOW_ITEM_TYPE_TCP),
644 .filter = ipn3ke_pattern_ip_tcp,
647 [IPN3KE_HASH_KEY_IP_UDP] = {
648 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4,
649 RTE_FLOW_ITEM_TYPE_UDP),
650 .filter = ipn3ke_pattern_ip_udp,
653 [IPN3KE_HASH_KEY_IP_NVGRE] = {
654 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_IPV4,
655 RTE_FLOW_ITEM_TYPE_UDP,
656 RTE_FLOW_ITEM_TYPE_NVGRE),
657 .filter = ipn3ke_pattern_ip_nvgre,
660 [IPN3KE_HASH_KEY_VXLAN_IP_UDP] = {
661 .items = FLOW_PATTERNS(RTE_FLOW_ITEM_TYPE_VXLAN,
662 RTE_FLOW_ITEM_TYPE_IPV4,
663 RTE_FLOW_ITEM_TYPE_UDP),
664 .filter = ipn3ke_pattern_vxlan_ip_udp,
669 ipn3ke_flow_convert_attributes(const struct rte_flow_attr *attr,
670 struct rte_flow_error *error)
673 rte_flow_error_set(error,
675 RTE_FLOW_ERROR_TYPE_ATTR,
682 rte_flow_error_set(error,
684 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
686 "groups are not supported");
691 rte_flow_error_set(error,
693 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
695 "egress is not supported");
699 if (attr->transfer) {
700 rte_flow_error_set(error,
702 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
704 "transfer is not supported");
708 if (!attr->ingress) {
709 rte_flow_error_set(error,
711 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
713 "only ingress is supported");
721 ipn3ke_flow_convert_actions(const struct rte_flow_action actions[],
722 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
724 const struct rte_flow_action_mark *mark = NULL;
727 rte_flow_error_set(error,
729 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
735 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
736 switch (actions->type) {
737 case RTE_FLOW_ACTION_TYPE_VOID:
740 case RTE_FLOW_ACTION_TYPE_MARK:
742 rte_flow_error_set(error,
744 RTE_FLOW_ERROR_TYPE_ACTION,
750 mark = actions->conf;
752 rte_flow_error_set(error,
754 RTE_FLOW_ERROR_TYPE_ACTION,
756 "mark must be defined");
758 } else if (mark->id > IPN3KE_FLOW_RESULT_UID_MAX) {
759 rte_flow_error_set(error,
761 RTE_FLOW_ERROR_TYPE_ACTION,
763 "mark id is out of range");
768 parser->mark_id = mark->id;
771 case RTE_FLOW_ACTION_TYPE_DROP:
776 rte_flow_error_set(error,
778 RTE_FLOW_ERROR_TYPE_ACTION,
785 if (!parser->drop && !parser->mark) {
786 rte_flow_error_set(error,
788 RTE_FLOW_ERROR_TYPE_ACTION,
798 ipn3ke_match_pattern(const enum rte_flow_item_type *patterns,
799 const struct rte_flow_item *input)
801 const struct rte_flow_item *item = input;
803 while ((*patterns == item->type) &&
804 (*patterns != RTE_FLOW_ITEM_TYPE_END)) {
809 return (*patterns == RTE_FLOW_ITEM_TYPE_END &&
810 item->type == RTE_FLOW_ITEM_TYPE_END);
813 static pattern_filter_t
814 ipn3ke_find_filter_func(const struct rte_flow_item *input,
817 pattern_filter_t filter = NULL;
820 for (i = 0; i < RTE_DIM(ipn3ke_supported_patterns); i++) {
821 if (ipn3ke_match_pattern(ipn3ke_supported_patterns[i].items,
823 filter = ipn3ke_supported_patterns[i].filter;
833 ipn3ke_flow_convert_items(const struct rte_flow_item items[],
834 struct rte_flow_error *error, struct ipn3ke_flow_parse *parser)
836 pattern_filter_t filter = NULL;
840 rte_flow_error_set(error,
842 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
848 filter = ipn3ke_find_filter_func(items, &idx);
851 rte_flow_error_set(error,
853 RTE_FLOW_ERROR_TYPE_ITEM,
855 "Unsupported pattern");
859 parser->key_type = idx;
861 return filter(items, error, parser);
864 /* Put the least @nbits of @data into @offset of @dst bits stream, and
865 * the @offset starts from MSB to LSB in each byte.
868 * +------+------+------+------+
870 * +------+------+------+------+
872 * |<- data: nbits ->|
877 copy_data_bits(uint8_t *dst, uint64_t data,
878 uint32_t offset, uint8_t nbits)
880 uint8_t set, *p = &dst[offset / BITS_PER_BYTE];
881 uint8_t bits_to_set = BITS_PER_BYTE - (offset % BITS_PER_BYTE);
882 uint8_t mask_to_set = 0xff >> (offset % BITS_PER_BYTE);
883 uint32_t size = offset + nbits;
885 if (nbits > (sizeof(data) * BITS_PER_BYTE)) {
886 IPN3KE_AFU_PMD_ERR("nbits is out of range");
890 while (nbits - bits_to_set >= 0) {
891 set = data >> (nbits - bits_to_set);
894 *p |= (set & mask_to_set);
896 nbits -= bits_to_set;
897 bits_to_set = BITS_PER_BYTE;
903 uint8_t shift = BITS_PER_BYTE - (size % BITS_PER_BYTE);
906 mask_to_set = 0xff << shift;
909 *p |= (set & mask_to_set);
914 ipn3ke_flow_key_generation(struct ipn3ke_flow_parse *parser,
915 struct rte_flow *flow)
917 uint32_t i, shift_bytes, len_in_bytes, offset;
921 dst = flow->rule.key;
925 IPN3KE_FLOW_KEY_ID_OFFSET,
926 IPN3KE_FLOW_KEY_ID_BITS);
928 /* The MSb of key is filled to 0 when it is less than
929 * IPN3KE_FLOW_KEY_DATA_BITS bit. And the parsed key data is
930 * save as MSB byte first in the array, it needs to move
931 * the bits before formatting them.
935 len_in_bytes = BITS_TO_BYTES(parser->key_len);
936 offset = (IPN3KE_FLOW_KEY_DATA_OFFSET +
937 IPN3KE_FLOW_KEY_DATA_BITS -
940 for (i = 0; i < len_in_bytes; i++) {
941 key = (key << 8) | parser->key[i];
943 if (++shift_bytes == sizeof(key)) {
946 copy_data_bits(dst, key, offset,
947 sizeof(key) * BITS_PER_BYTE);
948 offset += sizeof(key) * BITS_PER_BYTE;
953 if (shift_bytes != 0) {
956 rem_bits = parser->key_len % (sizeof(key) * BITS_PER_BYTE);
957 key >>= (shift_bytes * 8 - rem_bits);
958 copy_data_bits(dst, key, offset, rem_bits);
963 ipn3ke_flow_result_generation(struct ipn3ke_flow_parse *parser,
964 struct rte_flow *flow)
971 dst = flow->rule.result;
975 IPN3KE_FLOW_RESULT_ACL_OFFSET,
976 IPN3KE_FLOW_RESULT_ACL_BITS);
980 IPN3KE_FLOW_RESULT_UID_OFFSET,
981 IPN3KE_FLOW_RESULT_UID_BITS);
984 #define MHL_COMMAND_TIME_COUNT 0xFFFF
985 #define MHL_COMMAND_TIME_INTERVAL_US 10
988 ipn3ke_flow_hw_update(struct ipn3ke_hw *hw,
989 struct rte_flow *flow, uint32_t is_add)
991 uint32_t *pdata = NULL;
993 uint32_t time_out = MHL_COMMAND_TIME_COUNT;
996 IPN3KE_AFU_PMD_DEBUG("IPN3KE flow dump start\n");
998 pdata = (uint32_t *)flow->rule.key;
999 IPN3KE_AFU_PMD_DEBUG(" - key :");
1001 for (i = 0; i < RTE_DIM(flow->rule.key); i++)
1002 IPN3KE_AFU_PMD_DEBUG(" %02x", flow->rule.key[i]);
1004 for (i = 0; i < 4; i++)
1005 IPN3KE_AFU_PMD_DEBUG(" %02x", ipn3ke_swap32(pdata[3 - i]));
1006 IPN3KE_AFU_PMD_DEBUG("\n");
1008 pdata = (uint32_t *)flow->rule.result;
1009 IPN3KE_AFU_PMD_DEBUG(" - result:");
1011 for (i = 0; i < RTE_DIM(flow->rule.result); i++)
1012 IPN3KE_AFU_PMD_DEBUG(" %02x", flow->rule.result[i]);
1014 for (i = 0; i < 1; i++)
1015 IPN3KE_AFU_PMD_DEBUG(" %02x", pdata[i]);
1016 IPN3KE_AFU_PMD_DEBUG("IPN3KE flow dump end\n");
1018 pdata = (uint32_t *)flow->rule.key;
1020 IPN3KE_MASK_WRITE_REG(hw,
1021 IPN3KE_CLF_MHL_KEY_0,
1023 ipn3ke_swap32(pdata[3]),
1024 IPN3KE_CLF_MHL_KEY_MASK);
1026 IPN3KE_MASK_WRITE_REG(hw,
1027 IPN3KE_CLF_MHL_KEY_1,
1029 ipn3ke_swap32(pdata[2]),
1030 IPN3KE_CLF_MHL_KEY_MASK);
1032 IPN3KE_MASK_WRITE_REG(hw,
1033 IPN3KE_CLF_MHL_KEY_2,
1035 ipn3ke_swap32(pdata[1]),
1036 IPN3KE_CLF_MHL_KEY_MASK);
1038 IPN3KE_MASK_WRITE_REG(hw,
1039 IPN3KE_CLF_MHL_KEY_3,
1041 ipn3ke_swap32(pdata[0]),
1042 IPN3KE_CLF_MHL_KEY_MASK);
1044 pdata = (uint32_t *)flow->rule.result;
1045 IPN3KE_MASK_WRITE_REG(hw,
1048 ipn3ke_swap32(pdata[0]),
1049 IPN3KE_CLF_MHL_RES_MASK);
1051 /* insert/delete the key and result */
1053 data = IPN3KE_MASK_READ_REG(hw,
1054 IPN3KE_CLF_MHL_MGMT_CTRL,
1057 time_out = MHL_COMMAND_TIME_COUNT;
1058 while (IPN3KE_BIT_ISSET(data, IPN3KE_CLF_MHL_MGMT_CTRL_BIT_BUSY) &&
1060 data = IPN3KE_MASK_READ_REG(hw,
1061 IPN3KE_CLF_MHL_MGMT_CTRL,
1065 rte_delay_us(MHL_COMMAND_TIME_INTERVAL_US);
1070 IPN3KE_MASK_WRITE_REG(hw,
1071 IPN3KE_CLF_MHL_MGMT_CTRL,
1073 IPN3KE_CLF_MHL_MGMT_CTRL_INSERT,
1076 IPN3KE_MASK_WRITE_REG(hw,
1077 IPN3KE_CLF_MHL_MGMT_CTRL,
1079 IPN3KE_CLF_MHL_MGMT_CTRL_DELETE,
1086 ipn3ke_flow_hw_flush(struct ipn3ke_hw *hw)
1089 uint32_t time_out = MHL_COMMAND_TIME_COUNT;
1091 /* flush the MHL lookup table */
1093 data = IPN3KE_MASK_READ_REG(hw,
1094 IPN3KE_CLF_MHL_MGMT_CTRL,
1097 time_out = MHL_COMMAND_TIME_COUNT;
1098 while (IPN3KE_BIT_ISSET(data, IPN3KE_CLF_MHL_MGMT_CTRL_BIT_BUSY) &&
1100 data = IPN3KE_MASK_READ_REG(hw,
1101 IPN3KE_CLF_MHL_MGMT_CTRL,
1105 rte_delay_us(MHL_COMMAND_TIME_INTERVAL_US);
1109 IPN3KE_MASK_WRITE_REG(hw,
1110 IPN3KE_CLF_MHL_MGMT_CTRL,
1112 IPN3KE_CLF_MHL_MGMT_CTRL_FLUSH,
1119 ipn3ke_flow_convert_finalise(struct ipn3ke_hw *hw,
1120 struct ipn3ke_flow_parse *parser, struct rte_flow *flow)
1122 ipn3ke_flow_key_generation(parser, flow);
1123 ipn3ke_flow_result_generation(parser, flow);
1124 ipn3ke_flow_hw_update(hw, flow, 1);
1128 ipn3ke_flow_convert(const struct rte_flow_attr *attr,
1129 const struct rte_flow_item items[],
1130 const struct rte_flow_action actions[], struct rte_flow_error *error,
1131 struct ipn3ke_flow_parse *parser)
1135 ret = ipn3ke_flow_convert_attributes(attr, error);
1139 ret = ipn3ke_flow_convert_actions(actions, error, parser);
1143 ret = ipn3ke_flow_convert_items(items, error, parser);
1151 ipn3ke_flow_validate(__rte_unused struct rte_eth_dev *dev,
1152 const struct rte_flow_attr *attr, const struct rte_flow_item pattern[],
1153 const struct rte_flow_action actions[], struct rte_flow_error *error)
1155 struct ipn3ke_flow_parse parser = {0};
1156 return ipn3ke_flow_convert(attr, pattern, actions, error, &parser);
1159 static struct rte_flow *
1160 ipn3ke_flow_create(struct rte_eth_dev *dev,
1161 const struct rte_flow_attr *attr, const struct rte_flow_item pattern[],
1162 const struct rte_flow_action actions[], struct rte_flow_error *error)
1164 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1165 struct ipn3ke_flow_parse parser = {0};
1166 struct rte_flow *flow;
1169 if (hw->flow_num_entries == hw->flow_max_entries) {
1170 rte_flow_error_set(error,
1172 RTE_FLOW_ERROR_TYPE_HANDLE,
1174 "The flow table is full.");
1178 ret = ipn3ke_flow_convert(attr, pattern, actions, error, &parser);
1180 rte_flow_error_set(error,
1182 RTE_FLOW_ERROR_TYPE_HANDLE,
1184 "Failed to create flow.");
1188 flow = rte_zmalloc("ipn3ke_flow", sizeof(struct rte_flow), 0);
1190 rte_flow_error_set(error,
1192 RTE_FLOW_ERROR_TYPE_HANDLE,
1194 "Failed to allocate memory");
1198 ipn3ke_flow_convert_finalise(hw, &parser, flow);
1200 TAILQ_INSERT_TAIL(&hw->flow_list, flow, next);
1206 ipn3ke_flow_destroy(struct rte_eth_dev *dev,
1207 struct rte_flow *flow, struct rte_flow_error *error)
1209 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1212 ret = ipn3ke_flow_hw_update(hw, flow, 0);
1214 TAILQ_REMOVE(&hw->flow_list, flow, next);
1217 rte_flow_error_set(error,
1219 RTE_FLOW_ERROR_TYPE_HANDLE,
1221 "Failed to destroy flow.");
1228 ipn3ke_flow_flush(struct rte_eth_dev *dev,
1229 __rte_unused struct rte_flow_error *error)
1231 struct ipn3ke_hw *hw = IPN3KE_DEV_PRIVATE_TO_HW(dev);
1232 struct rte_flow *flow, *temp;
1234 RTE_TAILQ_FOREACH_SAFE(flow, &hw->flow_list, next, temp) {
1235 TAILQ_REMOVE(&hw->flow_list, flow, next);
1239 return ipn3ke_flow_hw_flush(hw);
1242 int ipn3ke_flow_init(void *dev)
1244 struct ipn3ke_hw *hw = (struct ipn3ke_hw *)dev;
1247 /* disable rx classifier bypass */
1248 IPN3KE_MASK_WRITE_REG(hw,
1253 data = IPN3KE_MASK_READ_REG(hw,
1257 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_RX_TEST: %x\n", data);
1259 /* configure base mac address */
1260 IPN3KE_MASK_WRITE_REG(hw,
1261 IPN3KE_CLF_BASE_DST_MAC_ADDR_HI,
1267 data = IPN3KE_MASK_READ_REG(hw,
1268 IPN3KE_CLF_BASE_DST_MAC_ADDR_HI,
1271 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_BASE_DST_MAC_ADDR_HI: %x\n", data);
1273 IPN3KE_MASK_WRITE_REG(hw,
1274 IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW,
1280 data = IPN3KE_MASK_READ_REG(hw,
1281 IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW,
1284 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_BASE_DST_MAC_ADDR_LOW: %x\n", data);
1287 /* configure hash lookup rules enable */
1288 IPN3KE_MASK_WRITE_REG(hw,
1289 IPN3KE_CLF_LKUP_ENABLE,
1295 data = IPN3KE_MASK_READ_REG(hw,
1296 IPN3KE_CLF_LKUP_ENABLE,
1299 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_LKUP_ENABLE: %x\n", data);
1302 /* configure rx parse config, settings associatied with VxLAN */
1303 IPN3KE_MASK_WRITE_REG(hw,
1304 IPN3KE_CLF_RX_PARSE_CFG,
1310 data = IPN3KE_MASK_READ_REG(hw,
1311 IPN3KE_CLF_RX_PARSE_CFG,
1314 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_RX_PARSE_CFG: %x\n", data);
1317 /* configure QinQ S-Tag */
1318 IPN3KE_MASK_WRITE_REG(hw,
1319 IPN3KE_CLF_QINQ_STAG,
1325 data = IPN3KE_MASK_READ_REG(hw,
1326 IPN3KE_CLF_QINQ_STAG,
1329 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_QINQ_STAG: %x\n", data);
1332 /* configure gen ctrl */
1333 IPN3KE_MASK_WRITE_REG(hw,
1334 IPN3KE_CLF_MHL_GEN_CTRL,
1340 data = IPN3KE_MASK_READ_REG(hw,
1341 IPN3KE_CLF_MHL_GEN_CTRL,
1344 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_MHL_GEN_CTRL: %x\n", data);
1347 /* clear monitoring register */
1348 IPN3KE_MASK_WRITE_REG(hw,
1349 IPN3KE_CLF_MHL_MON_0,
1355 data = IPN3KE_MASK_READ_REG(hw,
1356 IPN3KE_CLF_MHL_MON_0,
1359 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_MHL_MON_0: %x\n", data);
1362 ipn3ke_flow_hw_flush(hw);
1364 TAILQ_INIT(&hw->flow_list);
1365 hw->flow_max_entries = IPN3KE_MASK_READ_REG(hw,
1369 IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_EN_NUM: %x\n", hw->flow_max_entries);
1370 hw->flow_num_entries = 0;
1375 const struct rte_flow_ops ipn3ke_flow_ops = {
1376 .validate = ipn3ke_flow_validate,
1377 .create = ipn3ke_flow_create,
1378 .destroy = ipn3ke_flow_destroy,
1379 .flush = ipn3ke_flow_flush,