+ char mask[64];
+ int i, size = 0;
+ const char *mask_support = 0;
+
+ switch (type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ mask_support = (const char *)&dpaa2_flow_item_eth_mask;
+ size = sizeof(struct rte_flow_item_eth);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
+ size = sizeof(struct rte_flow_item_vlan);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
+ size = sizeof(struct rte_flow_item_ipv4);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
+ size = sizeof(struct rte_flow_item_ipv6);
+ break;
+ case RTE_FLOW_ITEM_TYPE_ICMP:
+ mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
+ size = sizeof(struct rte_flow_item_icmp);
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ mask_support = (const char *)&dpaa2_flow_item_udp_mask;
+ size = sizeof(struct rte_flow_item_udp);
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
+ size = sizeof(struct rte_flow_item_tcp);
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
+ size = sizeof(struct rte_flow_item_sctp);
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ mask_support = (const char *)&dpaa2_flow_item_gre_mask;
+ size = sizeof(struct rte_flow_item_gre);
+ break;
+ default:
+ return -1;
+ }
+
+ memcpy(mask, mask_support, size);
+
+ for (i = 0; i < size; i++)
+ mask[i] = (mask[i] | mask_src[i]);
+
+ if (memcmp(mask, mask_support, size))
+ return -1;
+
+ return 0;
+}
+
+static int
+dpaa2_configure_flow_eth(struct rte_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused,
+ int *device_configured)
+{
+ int index, ret;
+ int local_cfg = 0;
+ uint32_t group;
+ const struct rte_flow_item_eth *spec, *mask;
+
+ /* TODO: Currently upper bound of range parameter is not implemented */
+ const struct rte_flow_item_eth *last __rte_unused;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
+
+ group = attr->group;
+
+ /* Parse pattern list to get the matching parameters */
+ spec = (const struct rte_flow_item_eth *)pattern->spec;
+ last = (const struct rte_flow_item_eth *)pattern->last;
+ mask = (const struct rte_flow_item_eth *)
+ (pattern->mask ? pattern->mask : &dpaa2_flow_item_eth_mask);
+ if (!spec) {
+ /* Don't care any field of eth header,
+ * only care eth protocol.
+ */
+ DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip");
+ return 0;
+ }
+
+ /* Get traffic class index and flow id to be configured */
+ flow->tc_id = group;
+ flow->tc_index = attr->priority;
+
+ if (dpaa2_flow_extract_support((const uint8_t *)mask,
+ RTE_FLOW_ITEM_TYPE_ETH)) {
+ DPAA2_PMD_WARN("Extract field(s) of ethernet not support.");
+
+ return -1;
+ }
+
+ if (memcmp((const char *)&mask->src, zero_cmp, RTE_ETHER_ADDR_LEN)) {
+ index = dpaa2_flow_extract_search(
+ &priv->extract.qos_key_extract.dpkg,
+ NET_PROT_ETH, NH_FLD_ETH_SA);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.qos_key_extract,
+ NET_PROT_ETH, NH_FLD_ETH_SA,
+ RTE_ETHER_ADDR_LEN);
+ if (ret) {
+ DPAA2_PMD_ERR("QoS Extract add ETH_SA failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+ index = dpaa2_flow_extract_search(
+ &priv->extract.tc_key_extract[group].dpkg,
+ NET_PROT_ETH, NH_FLD_ETH_SA);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.tc_key_extract[group],
+ NET_PROT_ETH, NH_FLD_ETH_SA,
+ RTE_ETHER_ADDR_LEN);
+ if (ret) {
+ DPAA2_PMD_ERR("FS Extract add ETH_SA failed.");
+ return -1;
+ }
+ local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Move ipaddr before ETH_SA rule set failed");
+ return -1;
+ }
+
+ ret = dpaa2_flow_rule_data_set(
+ &priv->extract.qos_key_extract,
+ &flow->qos_rule,
+ NET_PROT_ETH,
+ NH_FLD_ETH_SA,
+ &spec->src.addr_bytes,
+ &mask->src.addr_bytes,
+ sizeof(struct rte_ether_addr));
+ if (ret) {
+ DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
+ return -1;
+ }
+
+ ret = dpaa2_flow_rule_data_set(
+ &priv->extract.tc_key_extract[group],
+ &flow->fs_rule,
+ NET_PROT_ETH,
+ NH_FLD_ETH_SA,
+ &spec->src.addr_bytes,
+ &mask->src.addr_bytes,
+ sizeof(struct rte_ether_addr));
+ if (ret) {
+ DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
+ return -1;
+ }
+ }
+
+ if (memcmp((const char *)&mask->dst, zero_cmp, RTE_ETHER_ADDR_LEN)) {
+ index = dpaa2_flow_extract_search(
+ &priv->extract.qos_key_extract.dpkg,
+ NET_PROT_ETH, NH_FLD_ETH_DA);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.qos_key_extract,
+ NET_PROT_ETH, NH_FLD_ETH_DA,
+ RTE_ETHER_ADDR_LEN);
+ if (ret) {
+ DPAA2_PMD_ERR("QoS Extract add ETH_DA failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ index = dpaa2_flow_extract_search(
+ &priv->extract.tc_key_extract[group].dpkg,
+ NET_PROT_ETH, NH_FLD_ETH_DA);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.tc_key_extract[group],
+ NET_PROT_ETH, NH_FLD_ETH_DA,
+ RTE_ETHER_ADDR_LEN);
+ if (ret) {
+ DPAA2_PMD_ERR("FS Extract add ETH_DA failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Move ipaddr before ETH DA rule set failed");
+ return -1;
+ }
+
+ ret = dpaa2_flow_rule_data_set(
+ &priv->extract.qos_key_extract,
+ &flow->qos_rule,
+ NET_PROT_ETH,
+ NH_FLD_ETH_DA,
+ &spec->dst.addr_bytes,
+ &mask->dst.addr_bytes,
+ sizeof(struct rte_ether_addr));
+ if (ret) {
+ DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
+ return -1;
+ }
+
+ ret = dpaa2_flow_rule_data_set(
+ &priv->extract.tc_key_extract[group],
+ &flow->fs_rule,
+ NET_PROT_ETH,
+ NH_FLD_ETH_DA,
+ &spec->dst.addr_bytes,
+ &mask->dst.addr_bytes,
+ sizeof(struct rte_ether_addr));
+ if (ret) {
+ DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
+ return -1;
+ }
+ }
+
+ if (memcmp((const char *)&mask->type, zero_cmp, sizeof(rte_be16_t))) {
+ index = dpaa2_flow_extract_search(
+ &priv->extract.qos_key_extract.dpkg,
+ NET_PROT_ETH, NH_FLD_ETH_TYPE);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.qos_key_extract,
+ NET_PROT_ETH, NH_FLD_ETH_TYPE,
+ RTE_ETHER_TYPE_LEN);
+ if (ret) {
+ DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+ index = dpaa2_flow_extract_search(
+ &priv->extract.tc_key_extract[group].dpkg,
+ NET_PROT_ETH, NH_FLD_ETH_TYPE);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.tc_key_extract[group],
+ NET_PROT_ETH, NH_FLD_ETH_TYPE,
+ RTE_ETHER_TYPE_LEN);
+ if (ret) {
+ DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Move ipaddr before ETH TYPE rule set failed");
+ return -1;
+ }
+
+ ret = dpaa2_flow_rule_data_set(
+ &priv->extract.qos_key_extract,
+ &flow->qos_rule,
+ NET_PROT_ETH,
+ NH_FLD_ETH_TYPE,
+ &spec->type,
+ &mask->type,
+ sizeof(rte_be16_t));
+ if (ret) {
+ DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
+ return -1;
+ }
+
+ ret = dpaa2_flow_rule_data_set(
+ &priv->extract.tc_key_extract[group],
+ &flow->fs_rule,
+ NET_PROT_ETH,
+ NH_FLD_ETH_TYPE,
+ &spec->type,
+ &mask->type,
+ sizeof(rte_be16_t));
+ if (ret) {
+ DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
+ return -1;
+ }
+ }
+
+ (*device_configured) |= local_cfg;
+
+ return 0;
+}
+
+static int
+dpaa2_configure_flow_vlan(struct rte_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused,
+ int *device_configured)
+{
+ int index, ret;
+ int local_cfg = 0;
+ uint32_t group;
+ const struct rte_flow_item_vlan *spec, *mask;
+
+ const struct rte_flow_item_vlan *last __rte_unused;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+ group = attr->group;
+
+ /* Parse pattern list to get the matching parameters */
+ spec = (const struct rte_flow_item_vlan *)pattern->spec;
+ last = (const struct rte_flow_item_vlan *)pattern->last;
+ mask = (const struct rte_flow_item_vlan *)
+ (pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask);
+
+ /* Get traffic class index and flow id to be configured */
+ flow->tc_id = group;
+ flow->tc_index = attr->priority;
+
+ if (!spec) {
+ /* Don't care any field of vlan header,
+ * only care vlan protocol.
+ */
+ /* Eth type is actually used for vLan classification.
+ */
+ struct proto_discrimination proto;
+
+ index = dpaa2_flow_extract_search(
+ &priv->extract.qos_key_extract.dpkg,
+ NET_PROT_ETH, NH_FLD_ETH_TYPE);
+ if (index < 0) {
+ ret = dpaa2_flow_proto_discrimination_extract(
+ &priv->extract.qos_key_extract,
+ RTE_FLOW_ITEM_TYPE_ETH);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "QoS Ext ETH_TYPE to discriminate vLan failed");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ index = dpaa2_flow_extract_search(
+ &priv->extract.tc_key_extract[group].dpkg,
+ NET_PROT_ETH, NH_FLD_ETH_TYPE);
+ if (index < 0) {
+ ret = dpaa2_flow_proto_discrimination_extract(
+ &priv->extract.tc_key_extract[group],
+ RTE_FLOW_ITEM_TYPE_ETH);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "FS Ext ETH_TYPE to discriminate vLan failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Move ipaddr before vLan discrimination set failed");
+ return -1;
+ }
+
+ proto.type = RTE_FLOW_ITEM_TYPE_ETH;
+ proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
+ ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
+ proto, group);
+ if (ret) {
+ DPAA2_PMD_ERR("vLan discrimination rule set failed");
+ return -1;
+ }
+
+ (*device_configured) |= local_cfg;
+
+ return 0;
+ }
+
+ if (dpaa2_flow_extract_support((const uint8_t *)mask,
+ RTE_FLOW_ITEM_TYPE_VLAN)) {
+ DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
+
+ return -1;
+ }
+
+ if (!mask->tci)
+ return 0;
+
+ index = dpaa2_flow_extract_search(
+ &priv->extract.qos_key_extract.dpkg,
+ NET_PROT_VLAN, NH_FLD_VLAN_TCI);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.qos_key_extract,
+ NET_PROT_VLAN,
+ NH_FLD_VLAN_TCI,
+ sizeof(rte_be16_t));
+ if (ret) {
+ DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ index = dpaa2_flow_extract_search(
+ &priv->extract.tc_key_extract[group].dpkg,
+ NET_PROT_VLAN, NH_FLD_VLAN_TCI);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.tc_key_extract[group],
+ NET_PROT_VLAN,
+ NH_FLD_VLAN_TCI,
+ sizeof(rte_be16_t));
+ if (ret) {
+ DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Move ipaddr before VLAN TCI rule set failed");
+ return -1;
+ }
+
+ ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
+ &flow->qos_rule,
+ NET_PROT_VLAN,
+ NH_FLD_VLAN_TCI,
+ &spec->tci,
+ &mask->tci,
+ sizeof(rte_be16_t));
+ if (ret) {
+ DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
+ return -1;
+ }
+
+ ret = dpaa2_flow_rule_data_set(
+ &priv->extract.tc_key_extract[group],
+ &flow->fs_rule,
+ NET_PROT_VLAN,
+ NH_FLD_VLAN_TCI,
+ &spec->tci,
+ &mask->tci,
+ sizeof(rte_be16_t));
+ if (ret) {
+ DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
+ return -1;
+ }
+
+ (*device_configured) |= local_cfg;
+
+ return 0;
+}
+
+static int
+dpaa2_configure_flow_ip_discrimation(
+ struct dpaa2_dev_priv *priv, struct rte_flow *flow,
+ const struct rte_flow_item *pattern,
+ int *local_cfg, int *device_configured,
+ uint32_t group)
+{
+ int index, ret;
+ struct proto_discrimination proto;
+
+ index = dpaa2_flow_extract_search(
+ &priv->extract.qos_key_extract.dpkg,
+ NET_PROT_ETH, NH_FLD_ETH_TYPE);
+ if (index < 0) {
+ ret = dpaa2_flow_proto_discrimination_extract(
+ &priv->extract.qos_key_extract,
+ RTE_FLOW_ITEM_TYPE_ETH);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "QoS Extract ETH_TYPE to discriminate IP failed.");
+ return -1;
+ }
+ (*local_cfg) |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ index = dpaa2_flow_extract_search(
+ &priv->extract.tc_key_extract[group].dpkg,
+ NET_PROT_ETH, NH_FLD_ETH_TYPE);
+ if (index < 0) {
+ ret = dpaa2_flow_proto_discrimination_extract(
+ &priv->extract.tc_key_extract[group],
+ RTE_FLOW_ITEM_TYPE_ETH);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "FS Extract ETH_TYPE to discriminate IP failed.");
+ return -1;
+ }
+ (*local_cfg) |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Move ipaddr before IP discrimination set failed");
+ return -1;
+ }
+
+ proto.type = RTE_FLOW_ITEM_TYPE_ETH;
+ if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
+ proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ else
+ proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
+ ret = dpaa2_flow_proto_discrimination_rule(priv, flow, proto, group);
+ if (ret) {
+ DPAA2_PMD_ERR("IP discrimination rule set failed");
+ return -1;
+ }
+
+ (*device_configured) |= (*local_cfg);
+
+ return 0;
+}
+
+
+static int
+dpaa2_configure_flow_generic_ip(
+ struct rte_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused,
+ int *device_configured)
+{
+ int index, ret;
+ int local_cfg = 0;
+ uint32_t group;
+ const struct rte_flow_item_ipv4 *spec_ipv4 = 0,
+ *mask_ipv4 = 0;
+ const struct rte_flow_item_ipv6 *spec_ipv6 = 0,
+ *mask_ipv6 = 0;
+ const void *key, *mask;
+ enum net_prot prot;
+
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
+ int size;
+
+ group = attr->group;
+
+ /* Parse pattern list to get the matching parameters */
+ if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+ spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec;
+ mask_ipv4 = (const struct rte_flow_item_ipv4 *)
+ (pattern->mask ? pattern->mask :
+ &dpaa2_flow_item_ipv4_mask);
+ } else {
+ spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec;
+ mask_ipv6 = (const struct rte_flow_item_ipv6 *)
+ (pattern->mask ? pattern->mask :
+ &dpaa2_flow_item_ipv6_mask);
+ }
+
+ /* Get traffic class index and flow id to be configured */
+ flow->tc_id = group;
+ flow->tc_index = attr->priority;
+
+ ret = dpaa2_configure_flow_ip_discrimation(priv,
+ flow, pattern, &local_cfg,
+ device_configured, group);
+ if (ret) {
+ DPAA2_PMD_ERR("IP discrimation failed!");
+ return -1;
+ }
+
+ if (!spec_ipv4 && !spec_ipv6)
+ return 0;
+
+ if (mask_ipv4) {
+ if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
+ RTE_FLOW_ITEM_TYPE_IPV4)) {
+ DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
+
+ return -1;
+ }
+ }
+
+ if (mask_ipv6) {
+ if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
+ RTE_FLOW_ITEM_TYPE_IPV6)) {
+ DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
+
+ return -1;
+ }
+ }
+
+ if (mask_ipv4 && (mask_ipv4->hdr.src_addr ||
+ mask_ipv4->hdr.dst_addr)) {
+ flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
+ } else if (mask_ipv6 &&
+ (memcmp((const char *)mask_ipv6->hdr.src_addr,
+ zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
+ memcmp((const char *)mask_ipv6->hdr.dst_addr,
+ zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
+ flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
+ }
+
+ if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
+ (mask_ipv6 &&
+ memcmp((const char *)mask_ipv6->hdr.src_addr,
+ zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
+ index = dpaa2_flow_extract_search(
+ &priv->extract.qos_key_extract.dpkg,
+ NET_PROT_IP, NH_FLD_IP_SRC);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.qos_key_extract,
+ NET_PROT_IP,
+ NH_FLD_IP_SRC,
+ 0);
+ if (ret) {
+ DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ index = dpaa2_flow_extract_search(
+ &priv->extract.tc_key_extract[group].dpkg,
+ NET_PROT_IP, NH_FLD_IP_SRC);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.tc_key_extract[group],
+ NET_PROT_IP,
+ NH_FLD_IP_SRC,
+ 0);
+ if (ret) {
+ DPAA2_PMD_ERR("FS Extract add IP_SRC failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ if (spec_ipv4)
+ key = &spec_ipv4->hdr.src_addr;
+ else
+ key = &spec_ipv6->hdr.src_addr[0];
+ if (mask_ipv4) {
+ mask = &mask_ipv4->hdr.src_addr;
+ size = NH_FLD_IPV4_ADDR_SIZE;
+ prot = NET_PROT_IPV4;
+ } else {
+ mask = &mask_ipv6->hdr.src_addr[0];
+ size = NH_FLD_IPV6_ADDR_SIZE;
+ prot = NET_PROT_IPV6;
+ }
+
+ ret = dpaa2_flow_rule_data_set(
+ &priv->extract.qos_key_extract,
+ &flow->qos_rule,
+ prot, NH_FLD_IP_SRC,
+ key, mask, size);
+ if (ret) {
+ DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed");
+ return -1;
+ }
+
+ ret = dpaa2_flow_rule_data_set(
+ &priv->extract.tc_key_extract[group],
+ &flow->fs_rule,
+ prot, NH_FLD_IP_SRC,
+ key, mask, size);
+ if (ret) {
+ DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed");
+ return -1;
+ }
+
+ flow->ipaddr_rule.qos_ipsrc_offset =
+ dpaa2_flow_extract_key_offset(
+ &priv->extract.qos_key_extract,
+ prot, NH_FLD_IP_SRC);
+ flow->ipaddr_rule.fs_ipsrc_offset =
+ dpaa2_flow_extract_key_offset(
+ &priv->extract.tc_key_extract[group],
+ prot, NH_FLD_IP_SRC);
+ }
+
+ if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
+ (mask_ipv6 &&
+ memcmp((const char *)mask_ipv6->hdr.dst_addr,
+ zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
+ index = dpaa2_flow_extract_search(
+ &priv->extract.qos_key_extract.dpkg,
+ NET_PROT_IP, NH_FLD_IP_DST);
+ if (index < 0) {
+ if (mask_ipv4)
+ size = NH_FLD_IPV4_ADDR_SIZE;
+ else
+ size = NH_FLD_IPV6_ADDR_SIZE;
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.qos_key_extract,
+ NET_PROT_IP,
+ NH_FLD_IP_DST,
+ size);
+ if (ret) {
+ DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ index = dpaa2_flow_extract_search(
+ &priv->extract.tc_key_extract[group].dpkg,
+ NET_PROT_IP, NH_FLD_IP_DST);
+ if (index < 0) {
+ if (mask_ipv4)
+ size = NH_FLD_IPV4_ADDR_SIZE;
+ else
+ size = NH_FLD_IPV6_ADDR_SIZE;
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.tc_key_extract[group],
+ NET_PROT_IP,
+ NH_FLD_IP_DST,
+ size);
+ if (ret) {
+ DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ if (spec_ipv4)
+ key = &spec_ipv4->hdr.dst_addr;
+ else
+ key = spec_ipv6->hdr.dst_addr;
+ if (mask_ipv4) {
+ mask = &mask_ipv4->hdr.dst_addr;
+ size = NH_FLD_IPV4_ADDR_SIZE;
+ prot = NET_PROT_IPV4;
+ } else {
+ mask = &mask_ipv6->hdr.dst_addr[0];
+ size = NH_FLD_IPV6_ADDR_SIZE;
+ prot = NET_PROT_IPV6;
+ }
+
+ ret = dpaa2_flow_rule_data_set(
+ &priv->extract.qos_key_extract,
+ &flow->qos_rule,
+ prot, NH_FLD_IP_DST,
+ key, mask, size);
+ if (ret) {
+ DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed");
+ return -1;
+ }
+
+ ret = dpaa2_flow_rule_data_set(
+ &priv->extract.tc_key_extract[group],
+ &flow->fs_rule,
+ prot, NH_FLD_IP_DST,
+ key, mask, size);
+ if (ret) {
+ DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed");
+ return -1;
+ }
+ flow->ipaddr_rule.qos_ipdst_offset =
+ dpaa2_flow_extract_key_offset(
+ &priv->extract.qos_key_extract,
+ prot, NH_FLD_IP_DST);
+ flow->ipaddr_rule.fs_ipdst_offset =
+ dpaa2_flow_extract_key_offset(
+ &priv->extract.tc_key_extract[group],
+ prot, NH_FLD_IP_DST);
+ }
+
+ if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) ||
+ (mask_ipv6 && mask_ipv6->hdr.proto)) {
+ index = dpaa2_flow_extract_search(
+ &priv->extract.qos_key_extract.dpkg,
+ NET_PROT_IP, NH_FLD_IP_PROTO);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.qos_key_extract,
+ NET_PROT_IP,
+ NH_FLD_IP_PROTO,
+ NH_FLD_IP_PROTO_SIZE);
+ if (ret) {
+ DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ index = dpaa2_flow_extract_search(
+ &priv->extract.tc_key_extract[group].dpkg,
+ NET_PROT_IP, NH_FLD_IP_PROTO);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.tc_key_extract[group],
+ NET_PROT_IP,
+ NH_FLD_IP_PROTO,
+ NH_FLD_IP_PROTO_SIZE);
+ if (ret) {
+ DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Move ipaddr after NH_FLD_IP_PROTO rule set failed");
+ return -1;
+ }
+
+ if (spec_ipv4)
+ key = &spec_ipv4->hdr.next_proto_id;
+ else
+ key = &spec_ipv6->hdr.proto;
+ if (mask_ipv4)
+ mask = &mask_ipv4->hdr.next_proto_id;
+ else
+ mask = &mask_ipv6->hdr.proto;
+
+ ret = dpaa2_flow_rule_data_set(
+ &priv->extract.qos_key_extract,
+ &flow->qos_rule,
+ NET_PROT_IP,
+ NH_FLD_IP_PROTO,
+ key, mask, NH_FLD_IP_PROTO_SIZE);
+ if (ret) {
+ DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed");
+ return -1;
+ }
+
+ ret = dpaa2_flow_rule_data_set(
+ &priv->extract.tc_key_extract[group],
+ &flow->fs_rule,
+ NET_PROT_IP,
+ NH_FLD_IP_PROTO,
+ key, mask, NH_FLD_IP_PROTO_SIZE);
+ if (ret) {
+ DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed");
+ return -1;
+ }
+ }
+
+ (*device_configured) |= local_cfg;
+
+ return 0;
+}
+
+static int
+dpaa2_configure_flow_icmp(struct rte_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused,
+ int *device_configured)
+{
+ int index, ret;
+ int local_cfg = 0;
+ uint32_t group;
+ const struct rte_flow_item_icmp *spec, *mask;
+
+ const struct rte_flow_item_icmp *last __rte_unused;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+ group = attr->group;
+
+ /* Parse pattern list to get the matching parameters */
+ spec = (const struct rte_flow_item_icmp *)pattern->spec;
+ last = (const struct rte_flow_item_icmp *)pattern->last;
+ mask = (const struct rte_flow_item_icmp *)
+ (pattern->mask ? pattern->mask : &dpaa2_flow_item_icmp_mask);
+
+ /* Get traffic class index and flow id to be configured */
+ flow->tc_id = group;
+ flow->tc_index = attr->priority;
+
+ if (!spec) {
+ /* Don't care any field of ICMP header,
+ * only care ICMP protocol.
+ * Example: flow create 0 ingress pattern icmp /
+ */
+ /* Next proto of Generical IP is actually used
+ * for ICMP identification.
+ */
+ struct proto_discrimination proto;
+
+ index = dpaa2_flow_extract_search(
+ &priv->extract.qos_key_extract.dpkg,
+ NET_PROT_IP, NH_FLD_IP_PROTO);
+ if (index < 0) {
+ ret = dpaa2_flow_proto_discrimination_extract(
+ &priv->extract.qos_key_extract,
+ DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "QoS Extract IP protocol to discriminate ICMP failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ index = dpaa2_flow_extract_search(
+ &priv->extract.tc_key_extract[group].dpkg,
+ NET_PROT_IP, NH_FLD_IP_PROTO);
+ if (index < 0) {
+ ret = dpaa2_flow_proto_discrimination_extract(
+ &priv->extract.tc_key_extract[group],
+ DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "FS Extract IP protocol to discriminate ICMP failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Move IP addr before ICMP discrimination set failed");
+ return -1;
+ }
+
+ proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
+ proto.ip_proto = IPPROTO_ICMP;
+ ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
+ proto, group);
+ if (ret) {
+ DPAA2_PMD_ERR("ICMP discrimination rule set failed");
+ return -1;
+ }
+
+ (*device_configured) |= local_cfg;
+
+ return 0;
+ }
+
+ if (dpaa2_flow_extract_support((const uint8_t *)mask,
+ RTE_FLOW_ITEM_TYPE_ICMP)) {
+ DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
+
+ return -1;
+ }
+
+ if (mask->hdr.icmp_type) {
+ index = dpaa2_flow_extract_search(
+ &priv->extract.qos_key_extract.dpkg,
+ NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.qos_key_extract,
+ NET_PROT_ICMP,
+ NH_FLD_ICMP_TYPE,
+ NH_FLD_ICMP_TYPE_SIZE);
+ if (ret) {
+ DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ index = dpaa2_flow_extract_search(
+ &priv->extract.tc_key_extract[group].dpkg,
+ NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.tc_key_extract[group],
+ NET_PROT_ICMP,
+ NH_FLD_ICMP_TYPE,
+ NH_FLD_ICMP_TYPE_SIZE);
+ if (ret) {
+ DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Move ipaddr before ICMP TYPE set failed");
+ return -1;
+ }
+
+ ret = dpaa2_flow_rule_data_set(
+ &priv->extract.qos_key_extract,
+ &flow->qos_rule,
+ NET_PROT_ICMP,
+ NH_FLD_ICMP_TYPE,
+ &spec->hdr.icmp_type,
+ &mask->hdr.icmp_type,
+ NH_FLD_ICMP_TYPE_SIZE);
+ if (ret) {
+ DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed");
+ return -1;
+ }
+
+ ret = dpaa2_flow_rule_data_set(
+ &priv->extract.tc_key_extract[group],
+ &flow->fs_rule,
+ NET_PROT_ICMP,
+ NH_FLD_ICMP_TYPE,
+ &spec->hdr.icmp_type,
+ &mask->hdr.icmp_type,
+ NH_FLD_ICMP_TYPE_SIZE);
+ if (ret) {
+ DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed");
+ return -1;
+ }
+ }
+
+ if (mask->hdr.icmp_code) {
+ index = dpaa2_flow_extract_search(
+ &priv->extract.qos_key_extract.dpkg,
+ NET_PROT_ICMP, NH_FLD_ICMP_CODE);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.qos_key_extract,
+ NET_PROT_ICMP,
+ NH_FLD_ICMP_CODE,
+ NH_FLD_ICMP_CODE_SIZE);
+ if (ret) {
+ DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ index = dpaa2_flow_extract_search(
+ &priv->extract.tc_key_extract[group].dpkg,
+ NET_PROT_ICMP, NH_FLD_ICMP_CODE);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.tc_key_extract[group],
+ NET_PROT_ICMP,
+ NH_FLD_ICMP_CODE,
+ NH_FLD_ICMP_CODE_SIZE);
+ if (ret) {
+ DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Move ipaddr after ICMP CODE set failed");
+ return -1;
+ }
+
+ ret = dpaa2_flow_rule_data_set(
+ &priv->extract.qos_key_extract,
+ &flow->qos_rule,
+ NET_PROT_ICMP,
+ NH_FLD_ICMP_CODE,
+ &spec->hdr.icmp_code,
+ &mask->hdr.icmp_code,
+ NH_FLD_ICMP_CODE_SIZE);
+ if (ret) {
+ DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed");
+ return -1;
+ }
+
+ ret = dpaa2_flow_rule_data_set(
+ &priv->extract.tc_key_extract[group],
+ &flow->fs_rule,
+ NET_PROT_ICMP,
+ NH_FLD_ICMP_CODE,
+ &spec->hdr.icmp_code,
+ &mask->hdr.icmp_code,
+ NH_FLD_ICMP_CODE_SIZE);
+ if (ret) {
+ DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed");
+ return -1;
+ }
+ }
+
+ (*device_configured) |= local_cfg;
+
+ return 0;
+}
+
+static int
+dpaa2_configure_flow_udp(struct rte_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused,
+ int *device_configured)
+{
+ int index, ret;
+ int local_cfg = 0;
+ uint32_t group;
+ const struct rte_flow_item_udp *spec, *mask;
+
+ const struct rte_flow_item_udp *last __rte_unused;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+ group = attr->group;
+
+ /* Parse pattern list to get the matching parameters */
+ spec = (const struct rte_flow_item_udp *)pattern->spec;
+ last = (const struct rte_flow_item_udp *)pattern->last;
+ mask = (const struct rte_flow_item_udp *)
+ (pattern->mask ? pattern->mask : &dpaa2_flow_item_udp_mask);
+
+ /* Get traffic class index and flow id to be configured */
+ flow->tc_id = group;
+ flow->tc_index = attr->priority;
+
+ if (!spec || !mc_l4_port_identification) {
+ struct proto_discrimination proto;
+
+ index = dpaa2_flow_extract_search(
+ &priv->extract.qos_key_extract.dpkg,
+ NET_PROT_IP, NH_FLD_IP_PROTO);
+ if (index < 0) {
+ ret = dpaa2_flow_proto_discrimination_extract(
+ &priv->extract.qos_key_extract,
+ DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "QoS Extract IP protocol to discriminate UDP failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ index = dpaa2_flow_extract_search(
+ &priv->extract.tc_key_extract[group].dpkg,
+ NET_PROT_IP, NH_FLD_IP_PROTO);
+ if (index < 0) {
+ ret = dpaa2_flow_proto_discrimination_extract(
+ &priv->extract.tc_key_extract[group],
+ DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "FS Extract IP protocol to discriminate UDP failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Move IP addr before UDP discrimination set failed");
+ return -1;
+ }
+
+ proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
+ proto.ip_proto = IPPROTO_UDP;
+ ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
+ proto, group);
+ if (ret) {
+ DPAA2_PMD_ERR("UDP discrimination rule set failed");
+ return -1;
+ }
+
+ (*device_configured) |= local_cfg;
+
+ if (!spec)
+ return 0;
+ }
+
+ if (dpaa2_flow_extract_support((const uint8_t *)mask,
+ RTE_FLOW_ITEM_TYPE_UDP)) {
+ DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
+
+ return -1;
+ }
+
+ if (mask->hdr.src_port) {
+ index = dpaa2_flow_extract_search(
+ &priv->extract.qos_key_extract.dpkg,
+ NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.qos_key_extract,
+ NET_PROT_UDP,
+ NH_FLD_UDP_PORT_SRC,
+ NH_FLD_UDP_PORT_SIZE);
+ if (ret) {
+ DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ index = dpaa2_flow_extract_search(
+ &priv->extract.tc_key_extract[group].dpkg,
+ NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.tc_key_extract[group],
+ NET_PROT_UDP,
+ NH_FLD_UDP_PORT_SRC,
+ NH_FLD_UDP_PORT_SIZE);
+ if (ret) {
+ DPAA2_PMD_ERR("FS Extract add UDP_SRC failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Move ipaddr before UDP_PORT_SRC set failed");
+ return -1;
+ }
+
+ ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
+ &flow->qos_rule,
+ NET_PROT_UDP,
+ NH_FLD_UDP_PORT_SRC,
+ &spec->hdr.src_port,
+ &mask->hdr.src_port,
+ NH_FLD_UDP_PORT_SIZE);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "QoS NH_FLD_UDP_PORT_SRC rule data set failed");
+ return -1;
+ }
+
+ ret = dpaa2_flow_rule_data_set(
+ &priv->extract.tc_key_extract[group],
+ &flow->fs_rule,
+ NET_PROT_UDP,
+ NH_FLD_UDP_PORT_SRC,
+ &spec->hdr.src_port,
+ &mask->hdr.src_port,
+ NH_FLD_UDP_PORT_SIZE);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "FS NH_FLD_UDP_PORT_SRC rule data set failed");
+ return -1;
+ }
+ }
+
+ if (mask->hdr.dst_port) {
+ index = dpaa2_flow_extract_search(
+ &priv->extract.qos_key_extract.dpkg,
+ NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.qos_key_extract,
+ NET_PROT_UDP,
+ NH_FLD_UDP_PORT_DST,
+ NH_FLD_UDP_PORT_SIZE);
+ if (ret) {
+ DPAA2_PMD_ERR("QoS Extract add UDP_DST failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
+ }
+
+ index = dpaa2_flow_extract_search(
+ &priv->extract.tc_key_extract[group].dpkg,
+ NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
+ if (index < 0) {
+ ret = dpaa2_flow_extract_add(
+ &priv->extract.tc_key_extract[group],
+ NET_PROT_UDP,
+ NH_FLD_UDP_PORT_DST,
+ NH_FLD_UDP_PORT_SIZE);
+ if (ret) {
+ DPAA2_PMD_ERR("FS Extract add UDP_DST failed.");
+
+ return -1;
+ }
+ local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
+ }
+
+ ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Move ipaddr before UDP_PORT_DST set failed");
+ return -1;
+ }
+
+ ret = dpaa2_flow_rule_data_set(
+ &priv->extract.qos_key_extract,
+ &flow->qos_rule,
+ NET_PROT_UDP,
+ NH_FLD_UDP_PORT_DST,
+ &spec->hdr.dst_port,
+ &mask->hdr.dst_port,
+ NH_FLD_UDP_PORT_SIZE);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "QoS NH_FLD_UDP_PORT_DST rule data set failed");
+ return -1;
+ }
+
+ ret = dpaa2_flow_rule_data_set(
+ &priv->extract.tc_key_extract[group],
+ &flow->fs_rule,
+ NET_PROT_UDP,
+ NH_FLD_UDP_PORT_DST,
+ &spec->hdr.dst_port,
+ &mask->hdr.dst_port,
+ NH_FLD_UDP_PORT_SIZE);
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "FS NH_FLD_UDP_PORT_DST rule data set failed");
+ return -1;
+ }
+ }
+
+ (*device_configured) |= local_cfg;
+
+ return 0;
+}
+
+static int
+dpaa2_configure_flow_tcp(struct rte_flow *flow,
+ struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error __rte_unused,
+ int *device_configured)
+{
+ int index, ret;
+ int local_cfg = 0;