X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fdpaa2%2Fdpaa2_ethdev.c;h=e037bdb097ba5693410b06c9723261b876a4f22a;hb=e31d4d214e64af6e7d1dbc1805ea31022fab1b6f;hp=61ce0623ccee9f721868e80e3e34a1befa940d4c;hpb=89c2ea8f540865c30603d79b53fa56cffb4ce87f;p=dpdk.git diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c index 61ce0623cc..e037bdb097 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.c +++ b/drivers/net/dpaa2/dpaa2_ethdev.c @@ -48,6 +48,7 @@ #include #include #include +#include #include "dpaa2_ethdev.h" @@ -62,9 +63,22 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->if_index = priv->hw_id; + dev_info->max_mac_addrs = priv->max_mac_filters; + dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; + dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; - + dev_info->rx_offload_capa = + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_10G; @@ -186,6 +200,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, struct dpni_queue cfg; uint8_t options = 0; uint8_t flow_id; + uint32_t bpid; int ret; PMD_INIT_FUNC_TRACE(); @@ -193,6 +208,13 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p", dev, rx_queue_id, mb_pool, rx_conf); + if (!priv->bp_list || priv->bp_list->mp != mb_pool) { + bpid = mempool_to_bpid(mb_pool); + ret = dpaa2_attach_bp_list(priv, + rte_dpaa2_bpid_info[bpid].bp_list); + if (ret) + return ret; + } dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ @@ -240,8 +262,13 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue)); memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue)); - tc_id = 0; - flow_id = tx_queue_id; + if (priv->num_tc == 1) { + tc_id = 0; + flow_id = tx_queue_id % priv->num_dist_per_tc[tc_id]; + } else { + tc_id = tx_queue_id; + flow_id = 0; + } ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, tc_id, flow_id, options, &tx_flow_cfg); @@ -290,6 +317,7 @@ dpaa2_dev_start(struct rte_eth_dev *dev) struct dpaa2_dev_priv *priv = data->dev_private; struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; struct dpni_queue cfg; + struct dpni_error_cfg err_cfg; uint16_t qdid; struct dpni_queue_id qid; struct dpaa2_queue *dpaa2_q; @@ -325,6 +353,48 @@ dpaa2_dev_start(struct rte_eth_dev *dev) dpaa2_q->fqid = qid.fqid; } + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, + DPNI_OFF_RX_L3_CSUM, true); + if (ret) { + PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret); + return ret; + } + + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, + DPNI_OFF_RX_L4_CSUM, true); + if (ret) { + PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret); + return ret; + } + + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, + DPNI_OFF_TX_L3_CSUM, true); + if (ret) { + PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret); + return ret; + } + + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, + DPNI_OFF_TX_L4_CSUM, true); + if (ret) { + PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret); + return ret; + } + + /*checksum errors, send them to normal path and set it in annotation */ + err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; + + err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; + err_cfg.set_frame_annotation = true; + + ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, + priv->token, &err_cfg); + if (ret) { + PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:" + "code = %d\n", ret); + return ret; + } + return 0; } @@ -367,12 +437,87 @@ dpaa2_dev_close(struct rte_eth_dev *dev) } } +static void +dpaa2_dev_promiscuous_enable( + struct rte_eth_dev *dev) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + RTE_LOG(ERR, PMD, "dpni is NULL"); + return; + } + + ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); + if (ret < 0) + RTE_LOG(ERR, PMD, "Unable to enable promiscuous mode %d", ret); +} + +static void +dpaa2_dev_promiscuous_disable( + struct rte_eth_dev *dev) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + RTE_LOG(ERR, PMD, "dpni is NULL"); + return; + } + + ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); + if (ret < 0) + RTE_LOG(ERR, PMD, "Unable to disable promiscuous mode %d", ret); +} + +static int +dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + int ret; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + + PMD_INIT_FUNC_TRACE(); + + if (dpni == NULL) { + RTE_LOG(ERR, PMD, "dpni is NULL"); + return -EINVAL; + } + + /* check that mtu is within the allowed range */ + if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN)) + return -EINVAL; + + /* Set the Max Rx frame length as 'mtu' + + * Maximum Ethernet header length + */ + ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, + mtu + ETH_VLAN_HLEN); + if (ret) { + PMD_DRV_LOG(ERR, "setting the max frame length failed"); + return -1; + } + PMD_DRV_LOG(INFO, "MTU is configured %d for the device\n", mtu); + return 0; +} + static struct eth_dev_ops dpaa2_ethdev_ops = { .dev_configure = dpaa2_eth_dev_configure, .dev_start = dpaa2_dev_start, .dev_stop = dpaa2_dev_stop, .dev_close = dpaa2_dev_close, + .promiscuous_enable = dpaa2_dev_promiscuous_enable, + .promiscuous_disable = dpaa2_dev_promiscuous_disable, .dev_infos_get = dpaa2_dev_info_get, + .mtu_set = dpaa2_dev_mtu_set, .rx_queue_setup = dpaa2_dev_rx_queue_setup, .rx_queue_release = dpaa2_dev_rx_queue_release, .tx_queue_setup = dpaa2_dev_tx_queue_setup, @@ -387,7 +532,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) struct fsl_mc_io *dpni_dev; struct dpni_attr attr; struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; + struct dpni_buffer_layout layout; int i, ret, hw_id; + int tot_size; PMD_INIT_FUNC_TRACE(); @@ -439,10 +586,19 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) */ priv->nb_rx_queues = priv->num_dist_per_tc[DPAA2_DEF_TC]; - priv->nb_tx_queues = attr.num_queues; + if (attr.num_tcs == 1) + priv->nb_tx_queues = attr.num_queues; + else + priv->nb_tx_queues = attr.num_tcs; + + PMD_INIT_LOG(DEBUG, "num_tc %d", priv->num_tc); + PMD_INIT_LOG(DEBUG, "nb_rx_queues %d", priv->nb_rx_queues); priv->hw = dpni_dev; priv->hw_id = hw_id; + priv->options = attr.options; + priv->max_mac_filters = attr.mac_filter_entries; + priv->max_vlan_filters = attr.vlan_filter_entries; priv->flags = 0; /* Allocate memory for hardware structure for queues */ @@ -452,6 +608,74 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) return -ret; } + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("dpni", + ETHER_ADDR_LEN * attr.mac_filter_entries, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " + "store MAC addresses", + ETHER_ADDR_LEN * attr.mac_filter_entries); + return -ENOMEM; + } + + ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, + priv->token, + (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes)); + if (ret) { + PMD_INIT_LOG(ERR, "DPNI get mac address failed:" + " Error Code = %d\n", ret); + return -ret; + } + + /* ... rx buffer layout ... */ + tot_size = DPAA2_HW_BUF_RESERVE + RTE_PKTMBUF_HEADROOM; + tot_size = RTE_ALIGN_CEIL(tot_size, + DPAA2_PACKET_LAYOUT_ALIGN); + + memset(&layout, 0, sizeof(struct dpni_buffer_layout)); + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | + DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | + DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | + DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE; + + layout.pass_frame_status = 1; + layout.data_head_room = tot_size + - DPAA2_FD_PTA_SIZE - DPAA2_MBUF_HW_ANNOTATION; + layout.private_data_size = DPAA2_FD_PTA_SIZE; + layout.pass_parser_result = 1; + PMD_INIT_LOG(DEBUG, "Tot_size = %d, head room = %d, private = %d", + tot_size, layout.data_head_room, layout.private_data_size); + ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_RX, &layout); + if (ret) { + PMD_INIT_LOG(ERR, "Err(%d) in setting rx buffer layout", ret); + return -1; + } + + /* ... tx buffer layout ... */ + memset(&layout, 0, sizeof(struct dpni_buffer_layout)); + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; + layout.pass_frame_status = 1; + ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX, &layout); + if (ret) { + PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer" + " layout", ret); + return -1; + } + + /* ... tx-conf and error buffer layout ... */ + memset(&layout, 0, sizeof(struct dpni_buffer_layout)); + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; + layout.pass_frame_status = 1; + ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX_CONFIRM, &layout); + if (ret) { + PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer" + " layout", ret); + return -1; + } + eth_dev->dev_ops = &dpaa2_ethdev_ops; eth_dev->data->drv_name = rte_dpaa2_pmd.driver.name; @@ -490,6 +714,11 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) priv->rx_vq[0] = NULL; } + /* Allocate memory for storing MAC addresses */ + if (eth_dev->data->mac_addrs) { + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + } /*Close the device at underlying layer*/ ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);