static int ice_dev_configure(struct rte_eth_dev *dev);
static int ice_dev_start(struct rte_eth_dev *dev);
-static void ice_dev_stop(struct rte_eth_dev *dev);
-static void ice_dev_close(struct rte_eth_dev *dev);
+static int ice_dev_stop(struct rte_eth_dev *dev);
+static int ice_dev_close(struct rte_eth_dev *dev);
static int ice_dev_reset(struct rte_eth_dev *dev);
static int ice_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
struct rte_eth_udp_tunnel *udp_tunnel);
static const struct rte_pci_id pci_id_ice_map[] = {
+ { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
+ { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP) },
+ { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T) },
+ { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE) },
+ { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP) },
{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_BACKPLANE) },
{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_QSFP) },
{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810C_SFP) },
{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SFP) },
{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_10G_BASE_T) },
{ RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822C_SGMII) },
- { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_BACKPLANE) },
- { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_SFP) },
- { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_10G_BASE_T) },
- { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_1GBE) },
- { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E823L_QSFP) },
+ { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_BACKPLANE) },
+ { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SFP) },
+ { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_10G_BASE_T) },
+ { RTE_PCI_DEVICE(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E822L_SGMII) },
{ .vendor_id = 0, /* sentinel */ },
};
return 0;
}
-/* PCIe configuration space setting */
-#define PCI_CFG_SPACE_SIZE 256
-#define PCI_CFG_SPACE_EXP_SIZE 4096
-#define PCI_EXT_CAP_ID(header) (int)((header) & 0x0000ffff)
-#define PCI_EXT_CAP_NEXT(header) (((header) >> 20) & 0xffc)
-#define PCI_EXT_CAP_ID_DSN 0x03
-
-static int
-ice_pci_find_next_ext_capability(struct rte_pci_device *dev, int cap)
-{
- uint32_t header;
- int ttl;
- int pos = PCI_CFG_SPACE_SIZE;
-
- /* minimum 8 bytes per capability */
- ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
-
- if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
- PMD_INIT_LOG(ERR, "ice error reading extended capabilities\n");
- return -1;
- }
-
- /*
- * If we have no capabilities, this is indicated by cap ID,
- * cap version and next pointer all being 0.
- */
- if (header == 0)
- return 0;
-
- while (ttl-- > 0) {
- if (PCI_EXT_CAP_ID(header) == cap)
- return pos;
-
- pos = PCI_EXT_CAP_NEXT(header);
-
- if (pos < PCI_CFG_SPACE_SIZE)
- break;
-
- if (rte_pci_read_config(dev, &header, 4, pos) < 0) {
- PMD_INIT_LOG(ERR, "ice error reading extended capabilities\n");
- return -1;
- }
- }
-
- return 0;
-}
-
/*
* Extract device serial number from PCIe Configuration Space and
* determine the pkg file path according to the DSN.
static int
ice_pkg_file_search_path(struct rte_pci_device *pci_dev, char *pkg_file)
{
- int pos;
+ off_t pos;
char opt_ddp_filename[ICE_MAX_PKG_FILENAME_SIZE];
uint32_t dsn_low, dsn_high;
memset(opt_ddp_filename, 0, ICE_MAX_PKG_FILENAME_SIZE);
- pos = ice_pci_find_next_ext_capability(pci_dev, PCI_EXT_CAP_ID_DSN);
+ pos = rte_pci_find_ext_capability(pci_dev, RTE_PCI_EXT_CAP_ID_DSN);
if (pos) {
rte_pci_read_config(pci_dev, &dsn_low, 4, pos + 4);
static void
ice_rss_ctx_init(struct ice_pf *pf)
{
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
-
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
-
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
+ memset(&pf->hash_ctx, 0, sizeof(pf->hash_ctx));
}
static uint64_t
return 0;
}
+ dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
ice_set_default_ptype_table(dev);
pci_dev = RTE_DEV_TO_PCI(dev->device);
intr_handle = &pci_dev->intr_handle;
goto err_init_mac;
}
- /* Pass the information to the rte_eth_dev_close() that it should also
- * release the private port resources.
- */
- dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
-
ret = ice_res_pool_init(&pf->msix_pool, 1,
hw->func_caps.common_cap.num_msix_vectors - 1);
if (ret) {
ICE_WRITE_REG(hw, GLINT_DYN_CTL(0), GLINT_DYN_CTL_WB_ON_ITR_M);
}
-static void
+static int
ice_dev_stop(struct rte_eth_dev *dev)
{
struct rte_eth_dev_data *data = dev->data;
/* avoid stopping again */
if (pf->adapter_stopped)
- return;
+ return 0;
/* stop and clear all Rx queues */
for (i = 0; i < data->nb_rx_queues; i++)
}
pf->adapter_stopped = true;
+ dev->data->dev_started = 0;
+
+ return 0;
}
-static void
+static int
ice_dev_close(struct rte_eth_dev *dev)
{
struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ice_adapter *ad =
ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int ret;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
/* Since stop will make link down, then the link event will be
* triggered, disable the irq firstly to avoid the port_infoe etc
*/
ice_pf_disable_irq0(hw);
- ice_dev_stop(dev);
+ ret = ice_dev_stop(dev);
if (!ad->is_safe_mode)
ice_flow_uninit(ad);
rte_free(pf->proto_xtr);
pf->proto_xtr = NULL;
- dev->dev_ops = NULL;
- dev->rx_pkt_burst = NULL;
- dev->tx_pkt_burst = NULL;
-
- rte_free(dev->data->mac_addrs);
- dev->data->mac_addrs = NULL;
-
/* disable uio intr before callback unregister */
rte_intr_disable(intr_handle);
/* unregister callback func from eal lib */
rte_intr_callback_unregister(intr_handle,
ice_interrupt_handler, dev);
+
+ return ret;
}
static int
return 0;
}
+static bool
+is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg)
+{
+ return ((cfg->hash_func >= ICE_RSS_HASH_TOEPLITZ &&
+ cfg->hash_func <= ICE_RSS_HASH_JHASH) &&
+ (cfg->hash_flds != 0 && cfg->addl_hdrs != 0)) ?
+ true : false;
+}
+
+static void
+hash_cfg_reset(struct ice_rss_hash_cfg *cfg)
+{
+ cfg->hash_flds = 0;
+ cfg->addl_hdrs = 0;
+ cfg->hash_func = 0;
+}
+
static int
-ice_add_rss_cfg_post(struct ice_pf *pf, uint32_t hdr, uint64_t fld, bool symm)
+ice_hash_moveout(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
{
+ enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = ICE_PF_TO_HW(pf);
struct ice_vsi *vsi = pf->main_vsi;
- if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) {
- if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr = hdr;
- pf->gtpu_hash_ctx.ipv4_udp.hash_fld = fld;
- pf->gtpu_hash_ctx.ipv4_udp.symm = symm;
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr = hdr;
- pf->gtpu_hash_ctx.ipv6_udp.hash_fld = fld;
- pf->gtpu_hash_ctx.ipv6_udp.symm = symm;
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr = hdr;
- pf->gtpu_hash_ctx.ipv4_tcp.hash_fld = fld;
- pf->gtpu_hash_ctx.ipv4_tcp.symm = symm;
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr = hdr;
- pf->gtpu_hash_ctx.ipv6_tcp.hash_fld = fld;
- pf->gtpu_hash_ctx.ipv6_tcp.symm = symm;
- } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
- pf->gtpu_hash_ctx.ipv4.pkt_hdr = hdr;
- pf->gtpu_hash_ctx.ipv4.hash_fld = fld;
- pf->gtpu_hash_ctx.ipv4.symm = symm;
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
- } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
- pf->gtpu_hash_ctx.ipv6.pkt_hdr = hdr;
- pf->gtpu_hash_ctx.ipv6.hash_fld = fld;
- pf->gtpu_hash_ctx.ipv6.symm = symm;
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
- }
- }
+ if (!is_hash_cfg_valid(cfg))
+ return -ENOENT;
- if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
- ICE_FLOW_SEG_HDR_GTPU_UP)) {
- if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) {
- ice_add_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4.hash_fld,
- pf->gtpu_hash_ctx.ipv4.pkt_hdr,
- pf->gtpu_hash_ctx.ipv4.symm);
- ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4);
- }
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) {
- ice_add_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6.hash_fld,
- pf->gtpu_hash_ctx.ipv6.pkt_hdr,
- pf->gtpu_hash_ctx.ipv6.symm);
- ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6);
- }
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv4)) {
- ice_add_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4.hash_fld,
- pf->gtpu_hash_ctx.ipv4.pkt_hdr,
- pf->gtpu_hash_ctx.ipv4.symm);
- ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv4);
- }
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- if (ICE_HASH_CFG_IS_ROTATING(&pf->gtpu_hash_ctx.ipv6)) {
- ice_add_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6.hash_fld,
- pf->gtpu_hash_ctx.ipv6.pkt_hdr,
- pf->gtpu_hash_ctx.ipv6.symm);
- ICE_HASH_CFG_ROTATE_STOP(&pf->gtpu_hash_ctx.ipv6);
- }
- }
+ status = ice_rem_rss_cfg(hw, vsi->idx, cfg->hash_flds,
+ cfg->addl_hdrs);
+ if (status && status != ICE_ERR_DOES_NOT_EXIST) {
+ PMD_DRV_LOG(ERR,
+ "ice_rem_rss_cfg failed for VSI:%d, error:%d\n",
+ vsi->idx, status);
+ return -EBUSY;
}
return 0;
}
static int
-ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
+ice_hash_moveback(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
{
+ enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = ICE_PF_TO_HW(pf);
struct ice_vsi *vsi = pf->main_vsi;
+ bool symm;
- if (hdr & (ICE_FLOW_SEG_HDR_GTPU_DWN |
- ICE_FLOW_SEG_HDR_GTPU_UP)) {
- if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4_udp.hash_fld,
- pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
- }
+ if (!is_hash_cfg_valid(cfg))
+ return -ENOENT;
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4.hash_fld,
- pf->gtpu_hash_ctx.ipv4.pkt_hdr);
- ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4);
- }
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6_udp.hash_fld,
- pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
- }
+ symm = (cfg->hash_func == ICE_RSS_HASH_TOEPLITZ_SYMMETRIC) ?
+ true : false;
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6.hash_fld,
- pf->gtpu_hash_ctx.ipv6.pkt_hdr);
- ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6);
- }
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4_tcp.hash_fld,
- pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
- }
+ status = ice_add_rss_cfg(hw, vsi->idx, cfg->hash_flds,
+ cfg->addl_hdrs, symm);
+ if (status) {
+ PMD_DRV_LOG(ERR,
+ "ice_add_rss_cfg failed for VSI:%d, error:%d\n",
+ vsi->idx, status);
+ return -EBUSY;
+ }
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4.hash_fld,
- pf->gtpu_hash_ctx.ipv4.pkt_hdr);
- ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv4);
- }
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6_tcp.hash_fld,
- pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
- }
+ return 0;
+}
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6.hash_fld,
- pf->gtpu_hash_ctx.ipv6.pkt_hdr);
- ICE_HASH_CFG_ROTATE_START(&pf->gtpu_hash_ctx.ipv6);
- }
- } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4.hash_fld,
- pf->gtpu_hash_ctx.ipv4.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
- }
+static int
+ice_hash_remove(struct ice_pf *pf, struct ice_rss_hash_cfg *cfg)
+{
+ int ret;
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_udp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4_udp.hash_fld,
- pf->gtpu_hash_ctx.ipv4_udp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
- }
+ ret = ice_hash_moveout(pf, cfg);
+ if (ret && (ret != -ENOENT))
+ return ret;
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv4_tcp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv4_tcp.hash_fld,
- pf->gtpu_hash_ctx.ipv4_tcp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
- }
- } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6.hash_fld,
- pf->gtpu_hash_ctx.ipv6.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
- }
+ hash_cfg_reset(cfg);
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_udp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6_udp.hash_fld,
- pf->gtpu_hash_ctx.ipv6_udp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
- }
+ return 0;
+}
- if (ICE_HASH_CFG_VALID(&pf->gtpu_hash_ctx.ipv6_tcp)) {
- ice_rem_rss_cfg(hw, vsi->idx,
- pf->gtpu_hash_ctx.ipv6_tcp.hash_fld,
- pf->gtpu_hash_ctx.ipv6_tcp.pkt_hdr);
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
- }
- }
+static int
+ice_add_rss_cfg_pre_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
+ u8 ctx_idx)
+{
+ int ret;
+
+ switch (ctx_idx) {
+ case ICE_HASH_GTPU_CTX_EH_IP:
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_EH_IP_UDP:
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_EH_IP_TCP:
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_UP_IP:
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_UP_IP_UDP:
+ case ICE_HASH_GTPU_CTX_UP_IP_TCP:
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_DW_IP:
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_remove(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_DW_IP_UDP:
+ case ICE_HASH_GTPU_CTX_DW_IP_TCP:
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveout(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ default:
+ break;
}
return 0;
}
+static u8 calc_gtpu_ctx_idx(uint32_t hdr)
+{
+ u8 eh_idx, ip_idx;
+
+ if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH)
+ eh_idx = 0;
+ else if (hdr & ICE_FLOW_SEG_HDR_GTPU_UP)
+ eh_idx = 1;
+ else if (hdr & ICE_FLOW_SEG_HDR_GTPU_DWN)
+ eh_idx = 2;
+ else
+ return ICE_HASH_GTPU_CTX_MAX;
+
+ ip_idx = 0;
+ if (hdr & ICE_FLOW_SEG_HDR_UDP)
+ ip_idx = 1;
+ else if (hdr & ICE_FLOW_SEG_HDR_TCP)
+ ip_idx = 2;
+
+ if (hdr & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6))
+ return eh_idx * 3 + ip_idx;
+ else
+ return ICE_HASH_GTPU_CTX_MAX;
+}
+
+static int
+ice_add_rss_cfg_pre(struct ice_pf *pf, uint32_t hdr)
+{
+ u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
+
+ if (hdr & ICE_FLOW_SEG_HDR_IPV4)
+ return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu4,
+ gtpu_ctx_idx);
+ else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
+ return ice_add_rss_cfg_pre_gtpu(pf, &pf->hash_ctx.gtpu6,
+ gtpu_ctx_idx);
+
+ return 0;
+}
+
static int
-ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
+ice_add_rss_cfg_post_gtpu(struct ice_pf *pf, struct ice_hash_gtpu_ctx *ctx,
+ u32 hdr, u64 fld, bool symm, u8 ctx_idx)
{
- if (hdr & ICE_FLOW_SEG_HDR_GTPU_EH) {
- if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_udp);
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_UDP)) {
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_udp);
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV4) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4_tcp);
- } else if ((hdr & ICE_FLOW_SEG_HDR_IPV6) &&
- (hdr & ICE_FLOW_SEG_HDR_TCP)) {
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6_tcp);
- } else if (hdr & ICE_FLOW_SEG_HDR_IPV4) {
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv4);
- } else if (hdr & ICE_FLOW_SEG_HDR_IPV6) {
- ICE_HASH_CFG_RESET(&pf->gtpu_hash_ctx.ipv6);
- }
+ int ret;
+
+ if (ctx_idx < ICE_HASH_GTPU_CTX_MAX) {
+ ctx->ctx[ctx_idx].addl_hdrs = hdr;
+ ctx->ctx[ctx_idx].hash_flds = fld;
+ ctx->ctx[ctx_idx].hash_func = symm;
+ }
+
+ switch (ctx_idx) {
+ case ICE_HASH_GTPU_CTX_EH_IP:
+ break;
+ case ICE_HASH_GTPU_CTX_EH_IP_UDP:
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_EH_IP_TCP:
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_UP_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_DW_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ case ICE_HASH_GTPU_CTX_UP_IP:
+ case ICE_HASH_GTPU_CTX_UP_IP_UDP:
+ case ICE_HASH_GTPU_CTX_UP_IP_TCP:
+ case ICE_HASH_GTPU_CTX_DW_IP:
+ case ICE_HASH_GTPU_CTX_DW_IP_UDP:
+ case ICE_HASH_GTPU_CTX_DW_IP_TCP:
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_UDP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ ret = ice_hash_moveback(pf,
+ &ctx->ctx[ICE_HASH_GTPU_CTX_EH_IP_TCP]);
+ if (ret && (ret != -ENOENT))
+ return ret;
+
+ break;
+ default:
+ break;
}
return 0;
}
+static int
+ice_add_rss_cfg_post(struct ice_pf *pf, uint32_t hdr, uint64_t fld, bool symm)
+{
+ u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
+
+ if (hdr & ICE_FLOW_SEG_HDR_IPV4)
+ return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu4, hdr,
+ fld, symm, gtpu_ctx_idx);
+ else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
+ return ice_add_rss_cfg_post_gtpu(pf, &pf->hash_ctx.gtpu6, hdr,
+ fld, symm, gtpu_ctx_idx);
+
+ return 0;
+}
+
+static void
+ice_rem_rss_cfg_post(struct ice_pf *pf, uint32_t hdr)
+{
+ u8 gtpu_ctx_idx = calc_gtpu_ctx_idx(hdr);
+
+ if (gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX)
+ return;
+
+ if (hdr & ICE_FLOW_SEG_HDR_IPV4)
+ hash_cfg_reset(&pf->hash_ctx.gtpu4.ctx[gtpu_ctx_idx]);
+ else if (hdr & ICE_FLOW_SEG_HDR_IPV6)
+ hash_cfg_reset(&pf->hash_ctx.gtpu6.ctx[gtpu_ctx_idx]);
+}
+
int
ice_rem_rss_cfg_wrap(struct ice_pf *pf, uint16_t vsi_id,
uint64_t fld, uint32_t hdr)
if (ret && ret != ICE_ERR_DOES_NOT_EXIST)
PMD_DRV_LOG(ERR, "remove rss cfg failed\n");
- ret = ice_rem_rss_cfg_post(pf, hdr);
- if (ret)
- PMD_DRV_LOG(ERR, "remove rss cfg post failed\n");
+ ice_rem_rss_cfg_post(pf, hdr);
return 0;
}