sfc_log_init(sa, "entry");
- dev_info->min_mtu = ETHER_MIN_MTU;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
dev_info->max_mtu = EFX_MAC_SDU_MAX;
dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
* The driver does not use it, but other PMDs update jumbo frame
* flag and max_rx_pkt_len when MTU is set.
*/
- if (mtu > ETHER_MAX_LEN) {
+ if (mtu > RTE_ETHER_MAX_LEN) {
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
}
return sap->dp_rx->pool_ops_supported(pool);
}
+static int
+sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ struct sfc_rxq_info *rxq_info;
+
+ SFC_ASSERT(queue_id < sas->rxq_count);
+ rxq_info = &sas->rxq_info[queue_id];
+
+ return sap->dp_rx->intr_enable(rxq_info->dp);
+}
+
+static int
+sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
+ struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
+ struct sfc_rxq_info *rxq_info;
+
+ SFC_ASSERT(queue_id < sas->rxq_count);
+ rxq_info = &sas->rxq_info[queue_id];
+
+ return sap->dp_rx->intr_disable(rxq_info->dp);
+}
+
static const struct eth_dev_ops sfc_eth_dev_ops = {
.dev_configure = sfc_dev_configure,
.dev_start = sfc_dev_start,
.rx_descriptor_done = sfc_rx_descriptor_done,
.rx_descriptor_status = sfc_rx_descriptor_status,
.tx_descriptor_status = sfc_tx_descriptor_status,
+ .rx_queue_intr_enable = sfc_rx_queue_intr_enable,
+ .rx_queue_intr_disable = sfc_rx_queue_intr_disable,
.tx_queue_setup = sfc_tx_queue_setup,
.tx_queue_release = sfc_tx_queue_release,
.flow_ctrl_get = sfc_flow_ctrl_get,
sfc_log_init(sa, "entry");
- dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0);
+ dev->data->mac_addrs = rte_zmalloc("sfc", RTE_ETHER_ADDR_LEN, 0);
if (dev->data->mac_addrs == NULL) {
rc = ENOMEM;
goto fail_mac_addrs;