return 0;
}
+static void
+ice_rss_ctx_init(struct ice_pf *pf)
+{
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
+
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
+
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
+}
+
static int
ice_dev_init(struct rte_eth_dev *dev)
{
/* get base queue pairs index in the device */
ice_base_queue_get(pf);
+ /* Initialize RSS context for gtpu_eh */
+ ice_rss_ctx_init(pf);
+
if (!ad->is_safe_mode) {
ret = ice_flow_init(ad);
if (ret) {
struct ice_hw *hw;
struct ice_vsi_ctx vsi_ctx;
enum ice_status ret;
+ int error = 0;
if (!vsi)
- return 0;
+ return error;
hw = ICE_VSI_TO_HW(vsi);
ret = ice_free_vsi(hw, vsi->idx, &vsi_ctx, false, NULL);
if (ret != ICE_SUCCESS) {
PMD_INIT_LOG(ERR, "Failed to free vsi by aq, %u", vsi->vsi_id);
- rte_free(vsi);
- return -1;
+ error = -1;
}
+ rte_free(vsi->rss_lut);
+ rte_free(vsi->rss_key);
rte_free(vsi);
- return 0;
+ return error;
}
void
return 0;
}
+static int
+ice_add_rss_cfg_post(struct ice_pf *pf, uint32_t hdr, uint64_t fld, bool symm)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_vsi *vsi = pf->main_vsi;
+
+ if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) {
+ if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr = hdr;
+ pf->gtpu_hash_ctx.ipv4_udp.hash_fld = fld;
+ pf->gtpu_hash_ctx.ipv4_udp.symm = symm;
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr = hdr;
+ pf->gtpu_hash_ctx.ipv6_udp.hash_fld = fld;
+ pf->gtpu_hash_ctx.ipv6_udp.symm = symm;
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr = hdr;
+ pf->gtpu_hash_ctx.ipv4_tcp.hash_fld = fld;
+ pf->gtpu_hash_ctx.ipv4_tcp.symm = symm;
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr = hdr;
+ pf->gtpu_hash_ctx.ipv6_tcp.hash_fld = fld;
+ pf->gtpu_hash_ctx.ipv6_tcp.symm = symm;
+ } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
+ pf->gtpu_hash_ctx.ipv4.pkt_hdr = hdr;
+ pf->gtpu_hash_ctx.ipv4.hash_fld = fld;
+ pf->gtpu_hash_ctx.ipv4.symm = symm;
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
+ } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
+ pf->gtpu_hash_ctx.ipv6.pkt_hdr = hdr;
+ pf->gtpu_hash_ctx.ipv6.hash_fld = fld;
+ pf->gtpu_hash_ctx.ipv6.symm = symm;
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
+ }
+ }
+
+ if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
+ ICE_FLOW_SEG_HDR_GTPU_UP)) {
+ if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) {
+ ice_add_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4.hash_fld,
+ pf->gtpu_hash_ctx.ipv4.pkt_hdr,
+ pf->gtpu_hash_ctx.ipv4.symm);
+ ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4);
+ }
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) {
+ ice_add_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6.hash_fld,
+ pf->gtpu_hash_ctx.ipv6.pkt_hdr,
+ pf->gtpu_hash_ctx.ipv6.symm);
+ ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6);
+ }
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) {
+ ice_add_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4.hash_fld,
+ pf->gtpu_hash_ctx.ipv4.pkt_hdr,
+ pf->gtpu_hash_ctx.ipv4.symm);
+ ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4);
+ }
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) {
+ ice_add_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6.hash_fld,
+ pf->gtpu_hash_ctx.ipv6.pkt_hdr,
+ pf->gtpu_hash_ctx.ipv6.symm);
+ ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ struct ice_vsi *vsi = pf->main_vsi;
+
+ if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
+ ICE_FLOW_SEG_HDR_GTPU_UP)) {
+ if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4_udp.hash_fld,
+ pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4.hash_fld,
+ pf->gtpu_hash_ctx.ipv4.pkt_hdr);
+ ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4);
+ }
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6_udp.hash_fld,
+ pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6.hash_fld,
+ pf->gtpu_hash_ctx.ipv6.pkt_hdr);
+ ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6);
+ }
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4_tcp.hash_fld,
+ pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4.hash_fld,
+ pf->gtpu_hash_ctx.ipv4.pkt_hdr);
+ ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4);
+ }
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6_tcp.hash_fld,
+ pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6.hash_fld,
+ pf->gtpu_hash_ctx.ipv6.pkt_hdr);
+ ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6);
+ }
+ } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4.hash_fld,
+ pf->gtpu_hash_ctx.ipv4.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4_udp.hash_fld,
+ pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv4_tcp.hash_fld,
+ pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
+ }
+ } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6.hash_fld,
+ pf->gtpu_hash_ctx.ipv6.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6_udp.hash_fld,
+ pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
+ }
+
+ if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) {
+ ice_rem_rss_cfg(hw, vsi->idx,
+ pf->gtpu_hash_ctx.ipv6_tcp.hash_fld,
+ pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr);
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
+{
+ if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) {
+ if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_UDP)) {
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
+ } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
+ (hdr & ICE_FLOW_SEG_HDR_TCP)) {
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
+ } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
+ } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
+ ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
+ }
+ }
+
+ return 0;
+}
+
+int
+ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
+ uint64_t fld, uint32_t hdr)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ int ret;
+
+ ret = ice_rem_rss_cfg(hw, vsi_id, fld, hdr);
+ if (ret && ret != ICE_ERR_DOES_NOT_EXIST)
+ PMD_DRV_LOG(ERR, "remove rss cfg failed\n");
+
+ ret = ice_rem_rss_cfg_post(pf, hdr);
+ if (ret)
+ PMD_DRV_LOG(ERR, "remove rss cfg post failed\n");
+
+ return 0;
+}
+
+int
+ice_add_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
+ uint64_t fld, uint32_t hdr, bool symm)
+{
+ struct ice_hw *hw = ICE_PF_TO_HW(pf);
+ int ret;
+
+ ret = ice_add_rss_cfg_pre(pf, hdr);
+ if (ret)
+ PMD_DRV_LOG(ERR, "add rss cfg pre failed\n");
+
+ ret = ice_add_rss_cfg(hw, vsi_id, fld, hdr, symm);
+ if (ret)
+ PMD_DRV_LOG(ERR, "add rss cfg failed\n");
+
+ ret = ice_add_rss_cfg_post(pf, hdr, fld, symm);
+ if (ret)
+ PMD_DRV_LOG(ERR, "add rss cfg post failed\n");
+
+ return 0;
+}
+
static void
ice_rss_hash_set(struct ice_pf *pf, uint64_t rss_hf)
{
- struct ice_hw *hw = ICE_PF_TO_HW(pf);
struct ice_vsi *vsi = pf->main_vsi;
int ret;
/* Configure RSS for IPv4 with src/dst addr as input set */
if (rss_hf & ETH_RSS_IPV4) {
- ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
if (ret)
/* Configure RSS for IPv6 with src/dst addr as input set */
if (rss_hf & ETH_RSS_IPV6) {
- ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
if (ret)
/* Configure RSS for udp4 with src/dst addr and port as input set */
if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
- ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV4,
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4,
ICE_FLOW_SEG_HDR_UDP |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
/* Configure RSS for udp6 with src/dst addr and port as input set */
if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
- ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_UDP_IPV6,
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6,
ICE_FLOW_SEG_HDR_UDP |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
/* Configure RSS for tcp4 with src/dst addr and port as input set */
if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
- ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV4,
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4,
ICE_FLOW_SEG_HDR_TCP |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
/* Configure RSS for tcp6 with src/dst addr and port as input set */
if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
- ret = ice_add_rss_cfg(hw, vsi->idx, ICE_HASH_TCP_IPV6,
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6,
ICE_FLOW_SEG_HDR_TCP |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
/* Configure RSS for sctp4 with src/dst addr and port as input set */
if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
- ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
ICE_FLOW_SEG_HDR_SCTP |
ICE_FLOW_SEG_HDR_IPV4 |
ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
/* Configure RSS for sctp6 with src/dst addr and port as input set */
if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
- ret = ice_add_rss_cfg(hw, vsi->idx, ICE_FLOW_HASH_IPV6,
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
ICE_FLOW_SEG_HDR_SCTP |
ICE_FLOW_SEG_HDR_IPV6 |
ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
PMD_DRV_LOG(ERR, "%s SCTP_IPV6 rss flow fail %d",
__func__, ret);
}
+
+ if (rss_hf & ETH_RSS_IPV4) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_GTPU_IP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_IPV4 rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_GTPU_EH |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4 rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV4,
+ ICE_FLOW_SEG_HDR_PPPOE |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s PPPoE_IPV4 rss flow fail %d",
+ __func__, ret);
+ }
+
+ if (rss_hf & ETH_RSS_IPV6) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_GTPU_IP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_IPV6 rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_GTPU_EH |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6 rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_FLOW_HASH_IPV6,
+ ICE_FLOW_SEG_HDR_PPPOE |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s PPPoE_IPV6 rss flow fail %d",
+ __func__, ret);
+ }
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4,
+ ICE_FLOW_SEG_HDR_GTPU_IP |
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_IPV4_UDP rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4,
+ ICE_FLOW_SEG_HDR_GTPU_EH |
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_UDP rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV4,
+ ICE_FLOW_SEG_HDR_PPPOE |
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_UDP rss flow fail %d",
+ __func__, ret);
+ }
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6,
+ ICE_FLOW_SEG_HDR_GTPU_IP |
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_IPV6_UDP rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6,
+ ICE_FLOW_SEG_HDR_GTPU_EH |
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_UDP rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_UDP_IPV6,
+ ICE_FLOW_SEG_HDR_PPPOE |
+ ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_UDP rss flow fail %d",
+ __func__, ret);
+ }
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4,
+ ICE_FLOW_SEG_HDR_GTPU_IP |
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_IPV4_TCP rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4,
+ ICE_FLOW_SEG_HDR_GTPU_EH |
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_TCP rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV4,
+ ICE_FLOW_SEG_HDR_PPPOE |
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s PPPoE_IPV4_TCP rss flow fail %d",
+ __func__, ret);
+ }
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6,
+ ICE_FLOW_SEG_HDR_GTPU_IP |
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_IPV6_TCP rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6,
+ ICE_FLOW_SEG_HDR_GTPU_EH |
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_TCP rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_TCP_IPV6,
+ ICE_FLOW_SEG_HDR_PPPOE |
+ ICE_FLOW_SEG_HDR_TCP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s PPPoE_IPV6_TCP rss flow fail %d",
+ __func__, ret);
+ }
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_SCTP) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV4,
+ ICE_FLOW_SEG_HDR_GTPU_IP |
+ ICE_FLOW_SEG_HDR_SCTP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_IPV4_SCTP rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV4,
+ ICE_FLOW_SEG_HDR_GTPU_EH |
+ ICE_FLOW_SEG_HDR_SCTP |
+ ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV4_SCTP rss flow fail %d",
+ __func__, ret);
+ }
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_SCTP) {
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV6,
+ ICE_FLOW_SEG_HDR_GTPU_IP |
+ ICE_FLOW_SEG_HDR_SCTP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_IPV6_SCTP rss flow fail %d",
+ __func__, ret);
+
+ ret = ice_add_rss_cfg_wrap(pf, vsi->idx, ICE_HASH_SCTP_IPV6,
+ ICE_FLOW_SEG_HDR_GTPU_EH |
+ ICE_FLOW_SEG_HDR_SCTP |
+ ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER, 0);
+ if (ret)
+ PMD_DRV_LOG(ERR, "%s GTPU_EH_IPV6_SCTP rss flow fail %d",
+ __func__, ret);
+ }
}
static int ice_init_rss(struct ice_pf *pf)
return 0;
}
- if (!vsi->rss_key)
+ if (!vsi->rss_key) {
vsi->rss_key = rte_zmalloc(NULL,
vsi->rss_key_size, 0);
- if (!vsi->rss_lut)
+ if (vsi->rss_key == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
+ return -ENOMEM;
+ }
+ }
+ if (!vsi->rss_lut) {
vsi->rss_lut = rte_zmalloc(NULL,
vsi->rss_lut_size, 0);
-
+ if (vsi->rss_lut == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for rss_key");
+ rte_free(vsi->rss_key);
+ vsi->rss_key = NULL;
+ return -ENOMEM;
+ }
+ }
/* configure RSS key */
if (!rss_conf->rss_key) {
/* Calculate the default hash key */
rte_memcpy(key.standard_rss_key, vsi->rss_key, vsi->rss_key_size);
ret = ice_aq_set_rss_key(hw, vsi->idx, &key);
if (ret)
- return -EINVAL;
+ goto out;
/* init RSS LUT table */
for (i = 0; i < vsi->rss_lut_size; i++)
ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF,
vsi->rss_lut, vsi->rss_lut_size);
if (ret)
- return -EINVAL;
+ goto out;
/* Enable registers for symmetric_toeplitz function. */
reg = ICE_READ_REG(hw, VSIQF_HASH_CTL(vsi->vsi_id));
ice_rss_hash_set(pf, rss_conf->rss_hf);
return 0;
+out:
+ rte_free(vsi->rss_key);
+ vsi->rss_key = NULL;
+ rte_free(vsi->rss_lut);
+ vsi->rss_lut = NULL;
+ return -EINVAL;
}
static int
ice_stat_update_40(hw, GLV_BPRCH(idx), GLV_BPRCL(idx),
vsi->offset_loaded, &oes->rx_broadcast,
&nes->rx_broadcast);
+ /* enlarge the limitation when rx_bytes overflowed */
+ if (vsi->offset_loaded) {
+ if (ICE_RXTX_BYTES_LOW(vsi->old_rx_bytes) > nes->rx_bytes)
+ nes->rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
+ nes->rx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_rx_bytes);
+ }
+ vsi->old_rx_bytes = nes->rx_bytes;
/* exclude CRC bytes */
nes->rx_bytes -= (nes->rx_unicast + nes->rx_multicast +
nes->rx_broadcast) * RTE_ETHER_CRC_LEN;
/* GLV_TDPC not supported */
ice_stat_update_32(hw, GLV_TEPC(idx), vsi->offset_loaded,
&oes->tx_errors, &nes->tx_errors);
+ /* enlarge the limitation when tx_bytes overflowed */
+ if (vsi->offset_loaded) {
+ if (ICE_RXTX_BYTES_LOW(vsi->old_tx_bytes) > nes->tx_bytes)
+ nes->tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
+ nes->tx_bytes += ICE_RXTX_BYTES_HIGH(vsi->old_tx_bytes);
+ }
+ vsi->old_tx_bytes = nes->tx_bytes;
vsi->offset_loaded = true;
PMD_DRV_LOG(DEBUG, "************** VSI[%u] stats start **************",
ice_stat_update_32(hw, PRTRPB_RDPC,
pf->offset_loaded, &os->eth.rx_discards,
&ns->eth.rx_discards);
+ /* enlarge the limitation when rx_bytes overflowed */
+ if (pf->offset_loaded) {
+ if (ICE_RXTX_BYTES_LOW(pf->old_rx_bytes) > ns->eth.rx_bytes)
+ ns->eth.rx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
+ ns->eth.rx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_rx_bytes);
+ }
+ pf->old_rx_bytes = ns->eth.rx_bytes;
/* Workaround: CRC size should not be included in byte statistics,
* so subtract RTE_ETHER_CRC_LEN from the byte counter for each rx
GLPRT_BPTCL(hw->port_info->lport),
pf->offset_loaded, &os->eth.tx_broadcast,
&ns->eth.tx_broadcast);
+ /* enlarge the limitation when tx_bytes overflowed */
+ if (pf->offset_loaded) {
+ if (ICE_RXTX_BYTES_LOW(pf->old_tx_bytes) > ns->eth.tx_bytes)
+ ns->eth.tx_bytes += (uint64_t)1 << ICE_40_BIT_WIDTH;
+ ns->eth.tx_bytes += ICE_RXTX_BYTES_HIGH(pf->old_tx_bytes);
+ }
+ pf->old_tx_bytes = ns->eth.tx_bytes;
ns->eth.tx_bytes -= (ns->eth.tx_unicast + ns->eth.tx_multicast +
ns->eth.tx_broadcast) * RTE_ETHER_CRC_LEN;