struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
uint64_t rx_offloads = eth_conf->rxmode.offloads;
uint64_t tx_offloads = eth_conf->txmode.offloads;
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
struct rte_device *rdev = dev->device;
struct rte_eth_link *link = &dev->data->dev_link;
struct rte_dpaa_device *dpaa_dev;
struct rte_intr_handle *intr_handle;
uint32_t max_rx_pktlen;
int speed, duplex;
- int ret;
+ int ret, rx_status;
PMD_INIT_FUNC_TRACE();
dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
- intr_handle = &dpaa_dev->intr_handle;
+ intr_handle = dpaa_dev->intr_handle;
__fif = container_of(fif, struct __fman_if, __if);
+ /* Check if interface is enabled in case of shared MAC */
+ if (fif->is_shared_mac) {
+ rx_status = fman_if_get_rx_status(fif);
+ if (!rx_status) {
+ DPAA_PMD_ERR("%s Interface not enabled in kernel!",
+ dpaa_intf->name);
+ return -EHOSTDOWN;
+ }
+ }
+
/* Rx offloads which are enabled by default */
if (dev_rx_offloads_nodis & ~rx_offloads) {
DPAA_PMD_INFO(
}
/* if the interrupts were configured on this devices*/
- if (intr_handle && intr_handle->fd) {
+ if (intr_handle && rte_intr_fd_get(intr_handle)) {
if (dev->data->dev_conf.intr_conf.lsc != 0)
rte_intr_callback_register(intr_handle,
dpaa_interrupt_handler,
(void *)dev);
- ret = dpaa_intr_enable(__fif->node_name, intr_handle->fd);
+ ret = dpaa_intr_enable(__fif->node_name,
+ rte_intr_fd_get(intr_handle));
if (ret) {
if (dev->data->dev_conf.intr_conf.lsc != 0) {
rte_intr_callback_unregister(intr_handle,
int bytes_read;
dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
- intr_handle = &dpaa_dev->intr_handle;
+ intr_handle = dpaa_dev->intr_handle;
+
+ if (rte_intr_fd_get(intr_handle) < 0)
+ return;
- bytes_read = read(intr_handle->fd, &buf, sizeof(uint64_t));
+ bytes_read = read(rte_intr_fd_get(intr_handle), &buf,
+ sizeof(uint64_t));
if (bytes_read < 0)
DPAA_PMD_ERR("Error reading eventfd\n");
dpaa_eth_link_update(dev, 0);
}
dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
- intr_handle = &dpaa_dev->intr_handle;
+ intr_handle = dpaa_dev->intr_handle;
__fif = container_of(fif, struct __fman_if, __if);
ret = dpaa_eth_dev_stop(dev);
if (link->link_status && !link->link_autoneg)
dpaa_restart_link_autoneg(__fif->node_name);
- if (intr_handle && intr_handle->fd &&
+ if (intr_handle && rte_intr_fd_get(intr_handle) &&
dev->data->dev_conf.intr_conf.lsc != 0) {
dpaa_intr_disable(__fif->node_name);
rte_intr_callback_unregister(intr_handle,
}
/* release configuration memory */
- if (dpaa_intf->fc_conf)
- rte_free(dpaa_intf->fc_conf);
+ rte_free(dpaa_intf->fc_conf);
/* Release RX congestion Groups */
if (dpaa_intf->cgr_rx) {
QM_FQCTRL_CTXASTASHING |
QM_FQCTRL_PREFERINCACHE;
opts.fqd.context_a.stashing.exclusive = 0;
- /* In muticore scenario stashing becomes a bottleneck on LS1046.
+ /* In multicore scenario stashing becomes a bottleneck on LS1046.
* So do not enable stashing in this case
*/
if (dpaa_svr_family != SVR_LS1046A_FAMILY)
rxq->qp = qp;
/* Set up the device interrupt handler */
- if (!dev->intr_handle) {
+ if (dev->intr_handle == NULL) {
struct rte_dpaa_device *dpaa_dev;
struct rte_device *rdev = dev->device;
dpaa_dev = container_of(rdev, struct rte_dpaa_device,
device);
- dev->intr_handle = &dpaa_dev->intr_handle;
- dev->intr_handle->intr_vec = rte_zmalloc(NULL,
- dpaa_push_mode_max_queue, 0);
- if (!dev->intr_handle->intr_vec) {
+ dev->intr_handle = dpaa_dev->intr_handle;
+ if (rte_intr_vec_list_alloc(dev->intr_handle,
+ NULL, dpaa_push_mode_max_queue)) {
DPAA_PMD_ERR("intr_vec alloc failed");
return -ENOMEM;
}
- dev->intr_handle->nb_efd = dpaa_push_mode_max_queue;
- dev->intr_handle->max_intr = dpaa_push_mode_max_queue;
+ if (rte_intr_nb_efd_set(dev->intr_handle,
+ dpaa_push_mode_max_queue))
+ return -rte_errno;
+
+ if (rte_intr_max_intr_set(dev->intr_handle,
+ dpaa_push_mode_max_queue))
+ return -rte_errno;
}
- dev->intr_handle->type = RTE_INTR_HANDLE_EXT;
- dev->intr_handle->intr_vec[queue_idx] = queue_idx + 1;
- dev->intr_handle->efds[queue_idx] = q_fd;
+ if (rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_EXT))
+ return -rte_errno;
+
+ if (rte_intr_vec_list_index_set(dev->intr_handle,
+ queue_idx, queue_idx + 1))
+ return -rte_errno;
+
+ if (rte_intr_efds_index_set(dev->intr_handle, queue_idx,
+ q_fd))
+ return -rte_errno;
+
rxq->q_fd = q_fd;
}
rxq->bp_array = rte_dpaa_bpid_info;
dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
int eth_rx_queue_id)
{
- struct qm_mcc_initfq opts;
+ struct qm_mcc_initfq opts = {0};
int ret;
u32 flags = 0;
struct dpaa_if *dpaa_intf = dev->data->dev_private;
struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
- dpaa_poll_queue_default_config(&opts);
-
- if (dpaa_intf->cgr_rx) {
- opts.we_mask |= QM_INITFQ_WE_CGID;
- opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
- opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
- }
-
+ qman_retire_fq(rxq, NULL);
+ qman_oos_fq(rxq);
ret = qman_init_fq(rxq, flags, &opts);
if (ret) {
- DPAA_PMD_ERR("init rx fqid %d failed with ret: %d",
+ DPAA_PMD_ERR("detach rx fqid %d failed with ret: %d",
rxq->fqid, ret);
}
/* no tx-confirmation */
opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
+ if (fman_ip_rev >= FMAN_V3) {
+ /* Set B0V bit in contextA to set ASPID to 0 */
+ opts.fqd.context_a.hi |= 0x04000000;
+ }
DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
if (cgr_tx) {
dpaa_intf->name = dpaa_device->name;
- /* save fman_if & cfg in the interface struture */
+ /* save fman_if & cfg in the interface structure */
eth_dev->process_private = fman_intf;
dpaa_intf->ifid = dev_id;
dpaa_intf->cfg = cfg;
if (dpaa_svr_family == SVR_LS1043A_FAMILY)
dpaa_push_mode_max_queue = 0;
- /* if push mode queues to be enabled. Currenly we are allowing
+ /* if push mode queues to be enabled. Currently we are allowing
* only one queue per thread.
*/
if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {