net/enic: support priorities for TCAM flows
[dpdk.git] / drivers / net / igc / igc_ethdev.c
index 16d98c6..810568b 100644 (file)
@@ -272,10 +272,6 @@ static const struct eth_dev_ops eth_igc_ops = {
 
        .rx_queue_setup         = eth_igc_rx_queue_setup,
        .rx_queue_release       = eth_igc_rx_queue_release,
-       .rx_queue_count         = eth_igc_rx_queue_count,
-       .rx_descriptor_done     = eth_igc_rx_descriptor_done,
-       .rx_descriptor_status   = eth_igc_rx_descriptor_status,
-       .tx_descriptor_status   = eth_igc_tx_descriptor_status,
        .tx_queue_setup         = eth_igc_tx_queue_setup,
        .tx_queue_release       = eth_igc_tx_queue_release,
        .tx_done_cleanup        = eth_igc_tx_done_cleanup,
@@ -544,8 +540,7 @@ eth_igc_interrupt_action(struct rte_eth_dev *dev)
                                pci_dev->addr.bus,
                                pci_dev->addr.devid,
                                pci_dev->addr.function);
-               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
-                               NULL);
+               rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
        }
 }
 
@@ -1227,6 +1222,10 @@ eth_igc_dev_init(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
        dev->dev_ops = &eth_igc_ops;
+       dev->rx_descriptor_done = eth_igc_rx_descriptor_done;
+       dev->rx_queue_count = eth_igc_rx_queue_count;
+       dev->rx_descriptor_status = eth_igc_rx_descriptor_status;
+       dev->tx_descriptor_status = eth_igc_tx_descriptor_status;
 
        /*
         * for secondary processes, we don't initialize any further as primary
@@ -2266,6 +2265,8 @@ eth_igc_rss_reta_update(struct rte_eth_dev *dev,
                return -EINVAL;
        }
 
+       RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+
        /* set redirection table */
        for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
                union igc_rss_reta_reg reta, reg;
@@ -2278,7 +2279,8 @@ eth_igc_rss_reta_update(struct rte_eth_dev *dev,
                                IGC_RSS_RDT_REG_SIZE_MASK);
 
                /* if no need to update the register */
-               if (!mask)
+               if (!mask ||
+                   shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
                        continue;
 
                /* check mask whether need to read the register value first */
@@ -2289,6 +2291,7 @@ eth_igc_rss_reta_update(struct rte_eth_dev *dev,
                                        IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
 
                /* update the register */
+               RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE);
                for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
                        if (mask & (1u << j))
                                reta.bytes[j] =
@@ -2318,6 +2321,8 @@ eth_igc_rss_reta_query(struct rte_eth_dev *dev,
                return -EINVAL;
        }
 
+       RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+
        /* read redirection table */
        for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
                union igc_rss_reta_reg reta;
@@ -2330,10 +2335,12 @@ eth_igc_rss_reta_query(struct rte_eth_dev *dev,
                                IGC_RSS_RDT_REG_SIZE_MASK);
 
                /* if no need to read register */
-               if (!mask)
+               if (!mask ||
+                   shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
                        continue;
 
                /* read register and get the queue index */
+               RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE);
                reta.dword = IGC_READ_REG_LE_VALUE(hw,
                                IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
                for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {