/* Mezz card for Blade Server */
#define HINIC_DEV_ID_MEZZ_25GE 0x0210
-#define HINIC_DEV_ID_MEZZ_40GE 0x020D
#define HINIC_DEV_ID_MEZZ_100GE 0x0205
/* 2*25G and 2*100G card */
#define HINIC_MIN_RX_BUF_SIZE 1024
#define HINIC_MAX_UC_MAC_ADDRS 128
#define HINIC_MAX_MC_MAC_ADDRS 2048
+
+#define HINIC_DEFAULT_BURST_SIZE 32
+#define HINIC_DEFAULT_NB_QUEUES 1
+#define HINIC_DEFAULT_RING_SIZE 1024
+#define HINIC_MAX_LRO_SIZE 65536
+
/*
* vlan_id is a 12 bit number.
* The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
#define HINIC_PKTLEN_TO_MTU(pktlen) \
((pktlen) - (ETH_HLEN + ETH_CRC_LEN))
+/* lro numer limit for one packet */
+#define HINIC_LRO_WQE_NUM_DEFAULT 8
+
/* Driver-specific log messages type */
int hinic_logtype;
* specific event.
*
* @param: The address of parameter (struct rte_eth_dev *) regsitered before.
- **/
+ */
static void hinic_dev_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = param;
return -EINVAL;
}
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
/* mtu size is 256~9600 */
if (dev->data->dev_conf.rxmode.max_rx_pkt_len < HINIC_MIN_FRAME_SIZE ||
dev->data->dev_conf.rxmode.max_rx_pkt_len >
nic_dev->rxqs[queue_idx] = rxq;
/* alloc rx sq hw wqepage*/
- rc = hinic_create_rq(hwdev, queue_idx, rq_depth);
+ rc = hinic_create_rq(hwdev, queue_idx, rq_depth, socket_id);
if (rc) {
PMD_DRV_LOG(ERR, "Create rxq[%d] failed, dev_name: %s, rq_depth: %d",
queue_idx, dev->data->name, rq_depth);
rxq->q_depth = rq_depth;
rxq->buf_len = (u16)buf_size;
rxq->rx_free_thresh = rx_free_thresh;
+ rxq->socket_id = socket_id;
/* the last point cant do mbuf rearm in bulk */
rxq->rxinfo_align_end = rxq->q_depth - rxq->rx_free_thresh;
nic_dev->txqs[queue_idx] = txq;
/* alloc tx sq hw wqepage */
- rc = hinic_create_sq(hwdev, queue_idx, sq_depth);
+ rc = hinic_create_sq(hwdev, queue_idx, sq_depth, socket_id);
if (rc) {
PMD_DRV_LOG(ERR, "Create txq[%d] failed, dev_name: %s, sq_depth: %d",
queue_idx, dev->data->name, sq_depth);
txq->sq_bot_sge_addr = HINIC_GET_WQ_TAIL(txq) -
sizeof(struct hinic_sq_bufdesc);
txq->cos = nic_dev->default_cos;
+ txq->socket_id = socket_id;
/* alloc software txinfo */
rc = hinic_setup_tx_resources(txq);
info->max_mac_addrs = HINIC_MAX_UC_MAC_ADDRS;
info->min_mtu = HINIC_MIN_MTU_SIZE;
info->max_mtu = HINIC_MAX_MTU_SIZE;
+ info->max_lro_pkt_size = HINIC_MAX_LRO_SIZE;
hinic_get_speed_capa(dev, &info->speed_capa);
info->rx_queue_offload_capa = 0;
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME;
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_TCP_LRO |
+ DEV_RX_OFFLOAD_RSS_HASH;
info->tx_queue_offload_capa = 0;
info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
info->rx_desc_lim = hinic_rx_desc_lim;
info->tx_desc_lim = hinic_tx_desc_lim;
+ /* Driver-preferred Rx/Tx parameters */
+ info->default_rxportconf.burst_size = HINIC_DEFAULT_BURST_SIZE;
+ info->default_txportconf.burst_size = HINIC_DEFAULT_BURST_SIZE;
+ info->default_rxportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES;
+ info->default_txportconf.nb_queues = HINIC_DEFAULT_NB_QUEUES;
+ info->default_rxportconf.ring_size = HINIC_DEFAULT_RING_SIZE;
+ info->default_txportconf.ring_size = HINIC_DEFAULT_RING_SIZE;
+
+ return 0;
+}
+
+static int hinic_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
+ size_t fw_size)
+{
+ struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ char fw_ver[HINIC_MGMT_VERSION_MAX_LEN] = {0};
+ int err;
+
+ err = hinic_get_mgmt_version(nic_dev->hwdev, fw_ver);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to get fw version\n");
+ return -EINVAL;
+ }
+
+ if (fw_size < strlen(fw_ver) + 1)
+ return (strlen(fw_ver) + 1);
+
+ snprintf(fw_version, fw_size, "%s", fw_ver);
+
return 0;
}
return 0;
}
-
static int hinic_rxtx_configure(struct rte_eth_dev *dev)
{
- int err;
struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
+ int err;
/* rx configure, if rss enable, need to init default configuration */
err = hinic_rx_configure(dev);
nic_dev = rxq->nic_dev;
/* free rxq_pkt mbuf */
- hinic_free_all_rx_skbs(rxq);
+ hinic_free_all_rx_mbufs(rxq);
/* free rxq_cqe, rxq_info */
hinic_free_rx_resources(rxq);
nic_dev = txq->nic_dev;
/* free txq_pkt mbuf */
- hinic_free_all_tx_skbs(txq);
+ hinic_free_all_tx_mbufs(txq);
/* free txq_info */
hinic_free_tx_resources(txq);
/* clean root context */
hinic_free_qp_ctxts(nic_dev->hwdev);
- hinic_free_fdir_filter(nic_dev);
+ hinic_destroy_fdir_filter(dev);
/* free mbuf */
hinic_free_all_rx_mbuf(dev);
return count;
}
+static void hinic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct hinic_rxq *rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->nb_desc = rxq->q_depth;
+}
+
+static void hinic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct hinic_txq *txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->q_depth;
+}
+
/**
* DPDK callback to retrieve names of extended device statistics
*
if (err)
return err;
- /* disable LRO */
- err = hinic_set_rx_lro(nic_dev->hwdev, 0, 0, (u8)0);
- if (err)
- return err;
-
/* Set pause enable, and up will disable pfc. */
err = hinic_set_default_pause_feature(nic_dev);
if (err)
static const struct eth_dev_ops hinic_pmd_ops = {
.dev_configure = hinic_dev_configure,
.dev_infos_get = hinic_dev_infos_get,
+ .fw_version_get = hinic_fw_version_get,
.rx_queue_setup = hinic_rx_queue_setup,
.tx_queue_setup = hinic_tx_queue_setup,
.dev_start = hinic_dev_start,
.xstats_get = hinic_dev_xstats_get,
.xstats_reset = hinic_dev_xstats_reset,
.xstats_get_names = hinic_dev_xstats_get_names,
+ .rxq_info_get = hinic_rxq_info_get,
+ .txq_info_get = hinic_txq_info_get,
.mac_addr_set = hinic_set_mac_addr,
.mac_addr_remove = hinic_mac_addr_remove,
.mac_addr_add = hinic_mac_addr_add,
static const struct eth_dev_ops hinic_pmd_vf_ops = {
.dev_configure = hinic_dev_configure,
.dev_infos_get = hinic_dev_infos_get,
+ .fw_version_get = hinic_fw_version_get,
.rx_queue_setup = hinic_rx_queue_setup,
.tx_queue_setup = hinic_tx_queue_setup,
.dev_start = hinic_dev_start,
.xstats_get = hinic_dev_xstats_get,
.xstats_reset = hinic_dev_xstats_reset,
.xstats_get_names = hinic_dev_xstats_get_names,
+ .rxq_info_get = hinic_rxq_info_get,
+ .txq_info_get = hinic_txq_info_get,
.mac_addr_set = hinic_set_mac_addr,
.mac_addr_remove = hinic_mac_addr_remove,
.mac_addr_add = hinic_mac_addr_add,
struct rte_ether_addr *eth_addr;
struct hinic_nic_dev *nic_dev;
struct hinic_filter_info *filter_info;
+ struct hinic_tcam_info *tcam_info;
u32 mac_size;
int rc;
/* EAL is SECONDARY and eth_dev is already created */
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- rc = rte_intr_callback_register(&pci_dev->intr_handle,
- hinic_dev_interrupt_handler,
- (void *)eth_dev);
- if (rc)
- PMD_DRV_LOG(ERR, "Initialize %s failed in secondary process",
- eth_dev->data->name);
+ PMD_DRV_LOG(INFO, "Initialize %s in secondary process",
+ eth_dev->data->name);
- return rc;
+ return 0;
}
nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(eth_dev);
/* initialize filter info */
filter_info = &nic_dev->filter;
+ tcam_info = &nic_dev->tcam;
memset(filter_info, 0, sizeof(struct hinic_filter_info));
+ memset(tcam_info, 0, sizeof(struct hinic_tcam_info));
/* initialize 5tuple filter list */
TAILQ_INIT(&filter_info->fivetuple_list);
+ TAILQ_INIT(&tcam_info->tcam_list);
TAILQ_INIT(&nic_dev->filter_ntuple_list);
TAILQ_INIT(&nic_dev->filter_ethertype_list);
TAILQ_INIT(&nic_dev->filter_fdir_rule_list);
static struct rte_pci_id pci_id_hinic_map[] = {
{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_PRD) },
{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_25GE) },
- { RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_40GE) },
{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_MEZZ_100GE) },
{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF) },
{ RTE_PCI_DEVICE(HINIC_HUAWEI_VENDOR_ID, HINIC_DEV_ID_VF_HV) },