const struct rte_ether_addr *mac;
int err;
- parent_adapter->eth_dev = eth_dev;
parent_adapter->pf.adapter = parent_adapter;
parent_adapter->pf.dev_data = eth_dev->data;
/* create a dummy main_vsi */
intr_handle = &pci_dev->intr_handle;
pf->adapter = ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
- pf->adapter->eth_dev = dev;
pf->dev_data = dev->data;
hw->back = pf->adapter;
hw->hw_addr = (uint8_t *)pci_dev->mem_resource[0].addr;
void
ice_vsi_disable_queues_intr(struct ice_vsi *vsi)
{
- struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
{
struct ice_hw *hw = ICE_PF_TO_HW(pf);
struct ice_vsi *vsi = pf->main_vsi;
- struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct rte_eth_dev_data *dev_data = pf->dev_data;
struct ice_aq_get_set_rss_lut_params lut_params;
struct rte_eth_rss_conf *rss_conf;
struct ice_aqc_get_set_rss_keys key;
bool is_safe_mode = pf->adapter->is_safe_mode;
uint32_t reg;
- rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
- nb_q = dev->data->nb_rx_queues;
+ rss_conf = &dev_data->dev_conf.rx_adv_conf.rss_conf;
+ nb_q = dev_data->nb_rx_queues;
vsi->rss_key_size = ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE;
vsi->rss_lut_size = pf->hash_lut_size;
void
ice_vsi_queues_bind_intr(struct ice_vsi *vsi)
{
- struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
void
ice_vsi_enable_queues_intr(struct ice_vsi *vsi)
{
- struct rte_eth_dev *dev = vsi->adapter->eth_dev;
+ struct rte_eth_dev *dev = &rte_eth_devices[vsi->adapter->pf.dev_data->port_id];
struct rte_pci_device *pci_dev = ICE_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
struct ice_adapter {
/* Common for both PF and VF */
struct ice_hw hw;
- struct rte_eth_dev *eth_dev;
struct ice_pf pf;
bool rx_bulk_alloc_allowed;
bool rx_vec_allowed;
(&(((struct ice_vsi *)vsi)->adapter->hw))
#define ICE_VSI_TO_PF(vsi) \
(&(((struct ice_vsi *)vsi)->adapter->pf))
-#define ICE_VSI_TO_ETH_DEV(vsi) \
- (((struct ice_vsi *)vsi)->adapter->eth_dev)
/* ICE_PF_TO */
#define ICE_PF_TO_HW(pf) \
static int
ice_fdir_init_filter_list(struct ice_pf *pf)
{
- struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
struct ice_fdir_info *fdir_info = &pf->fdir;
char fdir_hash_name[RTE_HASH_NAMESIZE];
int ret;
static int
ice_fdir_setup(struct ice_pf *pf)
{
- struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[pf->dev_data->port_id];
struct ice_hw *hw = ICE_PF_TO_HW(pf);
const struct rte_memzone *mz = NULL;
char z_name[RTE_MEMZONE_NAMESIZE];
static void
ice_fdir_teardown(struct ice_pf *pf)
{
- struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[pf->dev_data->port_id];
struct ice_hw *hw = ICE_PF_TO_HW(pf);
struct ice_vsi *vsi;
int err;
struct ice_vsi *vsi = rxq->vsi;
struct ice_hw *hw = ICE_VSI_TO_HW(vsi);
struct ice_pf *pf = ICE_VSI_TO_PF(vsi);
- struct rte_eth_dev *dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
+ struct rte_eth_dev_data *dev_data = rxq->vsi->adapter->pf.dev_data;
struct ice_rlan_ctx rx_ctx;
enum ice_status err;
uint16_t buf_size, len;
- struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
uint32_t rxdid = ICE_RXDID_COMMS_OVS;
uint32_t regval;
rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len;
rxq->max_pkt_len = RTE_MIN(len,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ dev_data->dev_conf.rxmode.max_rx_pkt_len);
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
/* Check if scattered RX needs to be used. */
if (rxq->max_pkt_len > buf_size)
- dev->data->scattered_rx = 1;
+ dev_data->scattered_rx = 1;
rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
{
struct ice_rx_queue *rxq = (struct ice_rx_queue *)rx_queue;
uint16_t nb_rx = 0;
- struct rte_eth_dev *dev;
if (!nb_pkts)
return 0;
if (ice_rx_alloc_bufs(rxq) != 0) {
uint16_t i, j;
- dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
- dev->data->rx_mbuf_alloc_failed +=
+ rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed +=
rxq->rx_free_thresh;
PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
"port_id=%u, queue_id=%u",
uint64_t dma_addr;
uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
- struct rte_eth_dev *dev;
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
/* allocate mbuf */
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!nmb)) {
- dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
- dev->data->rx_mbuf_alloc_failed++;
+ rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
break;
}
rxd = *rxdp; /* copy descriptor in ring to temp variable*/
return -EINVAL;
}
- dev = pf->adapter->eth_dev;
+ dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("ice fdir tx queue",
return -EINVAL;
}
- dev = pf->adapter->eth_dev;
+ dev = &rte_eth_devices[pf->adapter->pf.dev_data->port_id];
/* Allocate the RX queue data structure. */
rxq = rte_zmalloc_socket("ice fdir rx queue",
uint64_t dma_addr;
uint64_t pkt_flags;
uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
- struct rte_eth_dev *dev;
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
/* allocate mbuf */
nmb = rte_mbuf_raw_alloc(rxq->mp);
if (unlikely(!nmb)) {
- dev = ICE_VSI_TO_ETH_DEV(rxq->vsi);
- dev->data->rx_mbuf_alloc_failed++;
+ rxq->vsi->adapter->pf.dev_data->rx_mbuf_alloc_failed++;
break;
}
rxd = *rxdp; /* copy descriptor in ring to temp variable*/
#define FDIR_PARSING_ENABLE_PER_QUEUE(ad, on) do { \
int i; \
- for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \
- struct ice_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \
+ for (i = 0; i < (ad)->pf.dev_data->nb_rx_queues; i++) { \
+ struct ice_rx_queue *rxq = (ad)->pf.dev_data->rx_queues[i]; \
if (!rxq) \
continue; \
rxq->fdir_enabled = on; \
* needs to load 2nd 16B of each desc for RSS hash parsing,
* will cause performance drop to get into this context.
*/
- if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
+ if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_RSS_HASH) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh7 =
* needs to load 2nd 16B of each desc for RSS hash parsing,
* will cause performance drop to get into this context.
*/
- if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
+ if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_RSS_HASH) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh7 =
i = txq->tx_next_dd - txq->tx_rs_thresh + 1;
#ifdef CC_AVX512_SUPPORT
- struct rte_eth_dev *dev = txq->vsi->adapter->eth_dev;
+ struct rte_eth_dev *dev = &rte_eth_devices[txq->vsi->adapter->pf.dev_data->port_id];
if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 ||
dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) {
* needs to load 2nd 16B of each desc for RSS hash parsing,
* will cause performance drop to get into this context.
*/
- if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
+ if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_RSS_HASH) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh3 =
struct ice_adv_rule_info *rule_info)
{
struct ice_vsi *vsi = pf->main_vsi;
- struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct rte_eth_dev_data *dev_data = pf->adapter->pf.dev_data;
const struct rte_flow_action_queue *act_q;
const struct rte_flow_action_rss *act_qgrop;
uint16_t base_queue, i;
goto error;
if ((act_qgrop->queue[0] +
act_qgrop->queue_num) >
- dev->data->nb_rx_queues)
+ dev_data->nb_rx_queues)
goto error1;
for (i = 0; i < act_qgrop->queue_num - 1; i++)
if (act_qgrop->queue[i + 1] !=
break;
case RTE_FLOW_ACTION_TYPE_QUEUE:
act_q = action->conf;
- if (act_q->index >= dev->data->nb_rx_queues)
+ if (act_q->index >= dev_data->nb_rx_queues)
goto error;
rule_info->sw_act.fltr_act =
ICE_FWD_TO_Q;