dev->data->scattered_rx = 1;
}
+ if (!(default_q || fmc_q)) {
+ if (dpaa_fm_config(dev,
+ eth_conf->rx_adv_conf.rss_conf.rss_hf)) {
+ dpaa_write_fm_config_to_file();
+ DPAA_PMD_ERR("FM port configuration: Failed\n");
+ return -1;
+ }
+ dpaa_write_fm_config_to_file();
+ }
+
/* if the interrupts were configured on this devices*/
if (intr_handle && intr_handle->fd) {
if (dev->data->dev_conf.intr_conf.lsc != 0)
if (bytes_read < 0)
DPAA_PMD_ERR("Error reading eventfd\n");
dpaa_eth_link_update(dev, 0);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
+ if (!(default_q || fmc_q))
+ dpaa_write_fm_config_to_file();
+
/* Change tx callback to the real one */
if (dpaa_intf->cgr_tx)
dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
return ret;
}
+static int
+dpaa_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct rte_eth_conf *eth_conf = &data->dev_conf;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!(default_q || fmc_q)) {
+ if (dpaa_fm_config(dev, rss_conf->rss_hf)) {
+ DPAA_PMD_ERR("FM port configuration: Failed\n");
+ return -1;
+ }
+ eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
+ } else {
+ DPAA_PMD_ERR("Function not supported\n");
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+static int
+dpaa_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct rte_eth_conf *eth_conf = &data->dev_conf;
+
+ /* dpaa does not support rss_key, so length should be 0*/
+ rss_conf->rss_key_len = 0;
+ rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
+ return 0;
+}
+
static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev,
uint16_t queue_id)
{
.tx_queue_setup = dpaa_eth_tx_queue_setup,
.rx_queue_release = dpaa_eth_rx_queue_release,
.tx_queue_release = dpaa_eth_tx_queue_release,
- .rx_queue_count = dpaa_dev_rx_queue_count,
.rx_burst_mode_get = dpaa_dev_rx_burst_mode_get,
.tx_burst_mode_get = dpaa_dev_tx_burst_mode_get,
.rxq_info_get = dpaa_rxq_info_get,
.rx_queue_intr_enable = dpaa_dev_queue_intr_enable,
.rx_queue_intr_disable = dpaa_dev_queue_intr_disable,
+ .rss_hash_update = dpaa_dev_rss_hash_update,
+ .rss_hash_conf_get = dpaa_dev_rss_hash_conf_get,
};
static bool
}
int
-rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on)
+rte_pmd_dpaa_set_tx_loopback(uint16_t port, uint8_t on)
{
struct rte_eth_dev *dev;
if (default_q) {
num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
} else if (fmc_q) {
- num_rx_fqs = 1;
+ num_rx_fqs = dpaa_port_fmc_init(fman_intf, dev_rx_fqids,
+ dev_vspids,
+ DPAA_MAX_NUM_PCD_QUEUES);
+ if (num_rx_fqs < 0) {
+ DPAA_PMD_ERR("%s FMC initializes failed!",
+ dpaa_intf->name);
+ goto free_rx;
+ }
+ if (!num_rx_fqs) {
+ DPAA_PMD_WARN("%s is not configured by FMC.",
+ dpaa_intf->name);
+ }
} else {
/* FMCLESS mode, load balance to multiple cores.*/
num_rx_fqs = rte_lcore_count();
/* Populate ethdev structure */
eth_dev->dev_ops = &dpaa_devops;
+ eth_dev->rx_queue_count = dpaa_dev_rx_queue_count;
eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;