X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fsoftnic%2Frte_eth_softnic_flow.c;h=f05ff092fafe4da69731848a38446cecc0c68b03;hb=fe38ad9ba73e1c856f53391a3a844c8b05005db1;hp=9bb0d563f698de205239bc5e6e66f937419e8310;hpb=974a48d42b430db0873616aca8ca495acca3b5ee;p=dpdk.git diff --git a/drivers/net/softnic/rte_eth_softnic_flow.c b/drivers/net/softnic/rte_eth_softnic_flow.c index 9bb0d563f6..f05ff092fa 100644 --- a/drivers/net/softnic/rte_eth_softnic_flow.c +++ b/drivers/net/softnic/rte_eth_softnic_flow.c @@ -56,7 +56,7 @@ flow_attr_map_set(struct pmd_internals *softnic, map = (ingress) ? &softnic->flow.ingress_map[group_id] : &softnic->flow.egress_map[group_id]; - strcpy(map->pipeline_name, pipeline_name); + strlcpy(map->pipeline_name, pipeline_name, sizeof(map->pipeline_name)); map->table_id = table_id; map->valid = 1; @@ -1169,6 +1169,7 @@ flow_rule_action_get(struct pmd_internals *softnic, int n_jump_queue_rss_drop = 0; int n_count = 0; int n_mark = 0; + int n_vxlan_decap = 0; profile = softnic_table_action_profile_find(softnic, table->params.action_profile_name); @@ -1282,7 +1283,8 @@ flow_rule_action_get(struct pmd_internals *softnic, action, "QUEUE: Invalid RX queue ID"); - sprintf(name, "RXQ%u", (uint32_t)conf->index); + snprintf(name, sizeof(name), "RXQ%u", + (uint32_t)conf->index); status = softnic_pipeline_port_out_find(softnic, pipeline->name, @@ -1372,7 +1374,7 @@ flow_rule_action_get(struct pmd_internals *softnic, action, "RSS: Invalid RX queue ID"); - sprintf(name, "RXQ%u", + snprintf(name, sizeof(name), "RXQ%u", (uint32_t)conf->queue[i]); status = softnic_pipeline_port_out_find(softnic, @@ -1510,6 +1512,40 @@ flow_rule_action_get(struct pmd_internals *softnic, break; } /* RTE_FLOW_ACTION_TYPE_MARK */ + case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: + { + const struct rte_flow_action_mark *conf = action->conf; + + if (conf) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "VXLAN DECAP: Non-null configuration"); + + if (n_vxlan_decap) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "Only one VXLAN DECAP action per flow"); + + if ((params->action_mask & + (1LLU << RTE_TABLE_ACTION_DECAP)) == 0) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "VXLAN DECAP action not supported by this table"); + + n_vxlan_decap = 1; + + /* RTE_TABLE_ACTION_DECAP */ + rule_action->decap.n = 50; /* Ether/IPv4/UDP/VXLAN */ + rule_action->action_mask |= 1 << RTE_TABLE_ACTION_DECAP; + break; + } /* RTE_FLOW_ACTION_TYPE_VXLAN_DECAP */ + case RTE_FLOW_ACTION_TYPE_METER: { const struct rte_flow_action_meter *conf = action->conf; @@ -1588,17 +1624,209 @@ flow_rule_action_get(struct pmd_internals *softnic, /* RTE_TABLE_ACTION_METER */ rule_action->mtr.mtr[0].meter_profile_id = meter_profile_id; - rule_action->mtr.mtr[0].policer[e_RTE_METER_GREEN] = - (enum rte_table_action_policer)m->params.action[RTE_MTR_GREEN]; - rule_action->mtr.mtr[0].policer[e_RTE_METER_YELLOW] = - (enum rte_table_action_policer)m->params.action[RTE_MTR_YELLOW]; - rule_action->mtr.mtr[0].policer[e_RTE_METER_RED] = - (enum rte_table_action_policer)m->params.action[RTE_MTR_RED]; + rule_action->mtr.mtr[0].policer[RTE_COLOR_GREEN] = + softnic_table_action_policer(m->params.action[RTE_COLOR_GREEN]); + rule_action->mtr.mtr[0].policer[RTE_COLOR_YELLOW] = + softnic_table_action_policer(m->params.action[RTE_COLOR_YELLOW]); + rule_action->mtr.mtr[0].policer[RTE_COLOR_RED] = + softnic_table_action_policer(m->params.action[RTE_COLOR_RED]); rule_action->mtr.tc_mask = 1; rule_action->action_mask |= 1 << RTE_TABLE_ACTION_MTR; break; } /* RTE_FLOW_ACTION_TYPE_METER */ + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: + { + const struct rte_flow_action_vxlan_encap *conf = + action->conf; + const struct rte_flow_item *item; + union flow_item spec, mask; + int disabled = 0, status; + size_t size; + + if (conf == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "VXLAN ENCAP: Null configuration"); + + item = conf->definition; + if (item == NULL) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + action, + "VXLAN ENCAP: Null configuration definition"); + + if (!(params->action_mask & + (1LLU << RTE_TABLE_ACTION_ENCAP))) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "VXLAN ENCAP: Encap action not enabled for this table"); + + /* Check for Ether. */ + flow_item_skip_void(&item); + status = flow_item_proto_preprocess(item, &spec, &mask, + &size, &disabled, error); + if (status) + return status; + + if (item->type != RTE_FLOW_ITEM_TYPE_ETH) { + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: first encap item should be ether"); + } + rte_ether_addr_copy(&spec.eth.dst, + &rule_action->encap.vxlan.ether.da); + rte_ether_addr_copy(&spec.eth.src, + &rule_action->encap.vxlan.ether.sa); + + item++; + + /* Check for VLAN. */ + flow_item_skip_void(&item); + status = flow_item_proto_preprocess(item, &spec, &mask, + &size, &disabled, error); + if (status) + return status; + + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { + if (!params->encap.vxlan.vlan) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: vlan encap not supported by table"); + + uint16_t tci = rte_ntohs(spec.vlan.tci); + rule_action->encap.vxlan.vlan.pcp = + tci >> 13; + rule_action->encap.vxlan.vlan.dei = + (tci >> 12) & 0x1; + rule_action->encap.vxlan.vlan.vid = + tci & 0xfff; + + item++; + + flow_item_skip_void(&item); + status = flow_item_proto_preprocess(item, &spec, + &mask, &size, &disabled, error); + if (status) + return status; + } else { + if (params->encap.vxlan.vlan) + return rte_flow_error_set(error, + ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: expecting vlan encap item"); + } + + /* Check for IPV4/IPV6. */ + switch (item->type) { + case RTE_FLOW_ITEM_TYPE_IPV4: + { + rule_action->encap.vxlan.ipv4.sa = + rte_ntohl(spec.ipv4.hdr.src_addr); + rule_action->encap.vxlan.ipv4.da = + rte_ntohl(spec.ipv4.hdr.dst_addr); + rule_action->encap.vxlan.ipv4.dscp = + spec.ipv4.hdr.type_of_service >> 2; + rule_action->encap.vxlan.ipv4.ttl = + spec.ipv4.hdr.time_to_live; + break; + } + case RTE_FLOW_ITEM_TYPE_IPV6: + { + uint32_t vtc_flow; + + memcpy(&rule_action->encap.vxlan.ipv6.sa, + &spec.ipv6.hdr.src_addr, + sizeof(spec.ipv6.hdr.src_addr)); + memcpy(&rule_action->encap.vxlan.ipv6.da, + &spec.ipv6.hdr.dst_addr, + sizeof(spec.ipv6.hdr.dst_addr)); + vtc_flow = rte_ntohl(spec.ipv6.hdr.vtc_flow); + rule_action->encap.vxlan.ipv6.flow_label = + vtc_flow & 0xfffff; + rule_action->encap.vxlan.ipv6.dscp = + (vtc_flow >> 22) & 0x3f; + rule_action->encap.vxlan.ipv6.hop_limit = + spec.ipv6.hdr.hop_limits; + break; + } + default: + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: encap item after ether should be ipv4/ipv6"); + } + + item++; + + /* Check for UDP. */ + flow_item_skip_void(&item); + status = flow_item_proto_preprocess(item, &spec, &mask, + &size, &disabled, error); + if (status) + return status; + + if (item->type != RTE_FLOW_ITEM_TYPE_UDP) { + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: encap item after ipv4/ipv6 should be udp"); + } + rule_action->encap.vxlan.udp.sp = + rte_ntohs(spec.udp.hdr.src_port); + rule_action->encap.vxlan.udp.dp = + rte_ntohs(spec.udp.hdr.dst_port); + + item++; + + /* Check for VXLAN. */ + flow_item_skip_void(&item); + status = flow_item_proto_preprocess(item, &spec, &mask, + &size, &disabled, error); + if (status) + return status; + + if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) { + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: encap item after udp should be vxlan"); + } + rule_action->encap.vxlan.vxlan.vni = + (spec.vxlan.vni[0] << 16U | + spec.vxlan.vni[1] << 8U + | spec.vxlan.vni[2]); + + item++; + + /* Check for END. */ + flow_item_skip_void(&item); + + if (item->type != RTE_FLOW_ITEM_TYPE_END) + return rte_flow_error_set(error, + EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VXLAN ENCAP: expecting END item"); + + rule_action->encap.type = RTE_TABLE_ACTION_ENCAP_VXLAN; + rule_action->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP; + break; + } /* RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP */ + default: return -ENOTSUP; }