X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fdpaa2%2Fdpaa2_ethdev.c;h=8b803b8542dc0ef0643e535662f1b360a868669f;hb=a1756feeecd740d2beddfc0da92b5df6a9d0b06b;hp=4b3eb7f5c934d0c2e48d7306642cec191d64f5ae;hpb=8d21c56338115927abe5381557d38e4c85e9e00e;p=dpdk.git diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c index 4b3eb7f5c9..8b803b8542 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.c +++ b/drivers/net/dpaa2/dpaa2_ethdev.c @@ -32,6 +32,7 @@ #define DRIVER_LOOPBACK_MODE "drv_loopback" #define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch" #define DRIVER_TX_CONF "drv_tx_conf" +#define DRIVER_ERROR_QUEUE "drv_err_queue" #define CHECK_INTERVAL 100 /* 100ms */ #define MAX_REPEAT_TIME 90 /* 9s (90 * 100ms) in total */ @@ -71,6 +72,9 @@ bool dpaa2_enable_ts[RTE_MAX_ETHPORTS]; uint64_t dpaa2_timestamp_rx_dynflag; int dpaa2_timestamp_dynfield_offset = -1; +/* Enable error queue */ +bool dpaa2_enable_err_queue; + struct rte_dpaa2_xstats_name_off { char name[RTE_ETH_XSTATS_NAME_SIZE]; uint8_t page_id; /* dpni statistics page id */ @@ -95,10 +99,6 @@ static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { {"cgr_reject_bytes", 4, 1}, }; -static const enum rte_filter_op dpaa2_supported_filter_ops[] = { - RTE_ETH_FILTER_GET -}; - static struct rte_dpaa2_driver rte_dpaa2_pmd; static int dpaa2_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); @@ -226,9 +226,11 @@ dpaa2_fw_version_get(struct rte_eth_dev *dev, mc_ver_info.major, mc_ver_info.minor, mc_ver_info.revision); + if (ret < 0) + return -EINVAL; ret += 1; /* add the size of '\0' */ - if (fw_size < (uint32_t)ret) + if (fw_size < (size_t)ret) return ret; else return 0; @@ -391,6 +393,25 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) goto fail; } + if (dpaa2_enable_err_queue) { + priv->rx_err_vq = rte_zmalloc("dpni_rx_err", + sizeof(struct dpaa2_queue), 0); + + dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq; + dpaa2_q->q_storage = rte_malloc("err_dq_storage", + sizeof(struct queue_storage_info_t) * + RTE_MAX_LCORE, + RTE_CACHE_LINE_SIZE); + if (!dpaa2_q->q_storage) + goto fail; + + memset(dpaa2_q->q_storage, 0, + sizeof(struct queue_storage_info_t)); + for (i = 0; i < RTE_MAX_LCORE; i++) + if (dpaa2_alloc_dq_storage(&dpaa2_q->q_storage[i])) + goto fail; + } + for (i = 0; i < priv->nb_tx_queues; i++) { mc_q->eth_data = dev->data; mc_q->flow_id = 0xffff; @@ -458,6 +479,14 @@ fail: rte_free(dpaa2_q->q_storage); priv->rx_vq[i--] = NULL; } + + if (dpaa2_enable_err_queue) { + dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq; + if (dpaa2_q->q_storage) + dpaa2_free_dq_storage(dpaa2_q->q_storage); + rte_free(dpaa2_q->q_storage); + } + rte_free(mc_q); return -1; } @@ -891,9 +920,9 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; cong_notif_cfg.threshold_entry = nb_tx_desc; /* Notify that the queue is not congested when the data in - * the queue is below this thershold. + * the queue is below this thershold.(90% of value) */ - cong_notif_cfg.threshold_exit = nb_tx_desc - 24; + cong_notif_cfg.threshold_exit = (nb_tx_desc * 9) / 10; cong_notif_cfg.message_ctx = 0; cong_notif_cfg.message_iova = (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn); @@ -1163,11 +1192,31 @@ dpaa2_dev_start(struct rte_eth_dev *dev) dpaa2_q->fqid = qid.fqid; } - /*checksum errors, send them to normal path and set it in annotation */ - err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; - err_cfg.errors |= DPNI_ERROR_PHE; + if (dpaa2_enable_err_queue) { + ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_RX_ERR, 0, 0, &cfg, &qid); + if (ret) { + DPAA2_PMD_ERR("Error getting rx err flow information: err=%d", + ret); + return ret; + } + dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq; + dpaa2_q->fqid = qid.fqid; + dpaa2_q->eth_data = dev->data; - err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; + err_cfg.errors = DPNI_ERROR_DISC; + err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE; + } else { + /* checksum errors, send them to normal path + * and set it in annotation + */ + err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; + + /* if packet with parse error are not to be dropped */ + err_cfg.errors |= DPNI_ERROR_PHE; + + err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; + } err_cfg.set_frame_annotation = true; ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, @@ -2271,45 +2320,15 @@ int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, return ret; } -static inline int -dpaa2_dev_verify_filter_ops(enum rte_filter_op filter_op) -{ - unsigned int i; - - for (i = 0; i < RTE_DIM(dpaa2_supported_filter_ops); i++) { - if (dpaa2_supported_filter_ops[i] == filter_op) - return 0; - } - return -ENOTSUP; -} - static int -dpaa2_dev_flow_ctrl(struct rte_eth_dev *dev, - enum rte_filter_type filter_type, - enum rte_filter_op filter_op, - void *arg) +dpaa2_dev_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops) { - int ret = 0; - if (!dev) return -ENODEV; - switch (filter_type) { - case RTE_ETH_FILTER_GENERIC: - if (dpaa2_dev_verify_filter_ops(filter_op) < 0) { - ret = -ENOTSUP; - break; - } - *(const void **)arg = &dpaa2_flow_ops; - dpaa2_filter_type |= filter_type; - break; - default: - RTE_LOG(ERR, PMD, "Filter type (%d) not supported", - filter_type); - ret = -ENOTSUP; - break; - } - return ret; + *ops = &dpaa2_flow_ops; + return 0; } static void @@ -2317,12 +2336,18 @@ dpaa2_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, struct rte_eth_rxq_info *qinfo) { struct dpaa2_queue *rxq; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + uint16_t max_frame_length; rxq = (struct dpaa2_queue *)dev->data->rx_queues[queue_id]; qinfo->mp = rxq->mb_pool; qinfo->scattered_rx = dev->data->scattered_rx; qinfo->nb_desc = rxq->nb_desc; + if (dpni_get_max_frame_length(dpni, CMD_PRI_LOW, priv->token, + &max_frame_length) == 0) + qinfo->rx_buf_size = max_frame_length; qinfo->conf.rx_free_thresh = 1; qinfo->conf.rx_drop_en = 1; @@ -2396,7 +2421,7 @@ static struct eth_dev_ops dpaa2_ethdev_ops = { .mac_addr_set = dpaa2_dev_set_mac_addr, .rss_hash_update = dpaa2_dev_rss_hash_update, .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, - .filter_ctrl = dpaa2_dev_flow_ctrl, + .flow_ops_get = dpaa2_dev_flow_ops_get, .rxq_info_get = dpaa2_rxq_info_get, .txq_info_get = dpaa2_txq_info_get, .tm_ops_get = dpaa2_tm_ops_get, @@ -2624,6 +2649,11 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) DPAA2_PMD_INFO("TX_CONF Enabled"); } + if (dpaa2_get_devargs(dev->devargs, DRIVER_ERROR_QUEUE)) { + dpaa2_enable_err_queue = 1; + DPAA2_PMD_INFO("Enable error queue"); + } + /* Allocate memory for hardware structure for queues */ ret = dpaa2_alloc_rx_tx_queues(eth_dev); if (ret) { @@ -2863,5 +2893,6 @@ RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2, DRIVER_LOOPBACK_MODE "= " DRIVER_NO_PREFETCH_MODE "=" - DRIVER_TX_CONF "="); -RTE_LOG_REGISTER(dpaa2_logtype_pmd, pmd.net.dpaa2, NOTICE); + DRIVER_TX_CONF "=" + DRIVER_ERROR_QUEUE "="); +RTE_LOG_REGISTER_DEFAULT(dpaa2_logtype_pmd, NOTICE);