return rc;
}
+int
+cnxk_nix_flow_ops_get(struct rte_eth_dev *eth_dev,
+ const struct rte_flow_ops **ops)
+{
+ RTE_SET_USED(eth_dev);
+
+ *ops = &cnxk_flow_ops;
+ return 0;
+}
+
int
cnxk_nix_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)
{
roc_nix_npc_promisc_ena_dis(nix, true);
dev->dmac_filter_enable = true;
eth_dev->data->promiscuous = false;
+ dev->dmac_filter_count++;
return 0;
}
rc = roc_nix_mac_addr_del(nix, index);
if (rc)
plt_err("Failed to delete mac address, rc=%d", rc);
+
+ dev->dmac_filter_count--;
}
int
int rc = -EINVAL;
uint32_t buffsz;
+ frame_size += CNXK_NIX_TIMESYNC_RX_OFFSET * dev->ptp_en;
+
/* Check if MTU is within the allowed range */
if ((frame_size - RTE_ETHER_CRC_LEN) < NIX_MIN_HW_FRS) {
plt_err("MTU is lesser than minimum");
return -ENOTSUP;
}
+
+int
+cnxk_nix_fw_version_get(struct rte_eth_dev *eth_dev, char *fw_version,
+ size_t fw_size)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ const char *str = roc_npc_profile_name_get(&dev->npc);
+ uint32_t size = strlen(str) + 1;
+
+ if (fw_size > size)
+ fw_size = size;
+
+ rte_strlcpy(fw_version, str, fw_size);
+
+ if (fw_size < size)
+ return size;
+
+ return 0;
+}
+
+void
+cnxk_nix_rxq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
+ struct rte_eth_rxq_info *qinfo)
+{
+ void *rxq = eth_dev->data->rx_queues[qid];
+ struct cnxk_eth_rxq_sp *rxq_sp = cnxk_eth_rxq_to_sp(rxq);
+
+ memset(qinfo, 0, sizeof(*qinfo));
+
+ qinfo->mp = rxq_sp->qconf.mp;
+ qinfo->scattered_rx = eth_dev->data->scattered_rx;
+ qinfo->nb_desc = rxq_sp->qconf.nb_desc;
+
+ memcpy(&qinfo->conf, &rxq_sp->qconf.conf.rx, sizeof(qinfo->conf));
+}
+
+void
+cnxk_nix_txq_info_get(struct rte_eth_dev *eth_dev, uint16_t qid,
+ struct rte_eth_txq_info *qinfo)
+{
+ void *txq = eth_dev->data->tx_queues[qid];
+ struct cnxk_eth_txq_sp *txq_sp = cnxk_eth_txq_to_sp(txq);
+
+ memset(qinfo, 0, sizeof(*qinfo));
+
+ qinfo->nb_desc = txq_sp->qconf.nb_desc;
+
+ memcpy(&qinfo->conf, &txq_sp->qconf.conf.tx, sizeof(qinfo->conf));
+}
+
+/* It is a NOP for cnxk as HW frees the buffer on xmit */
+int
+cnxk_nix_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ RTE_SET_USED(txq);
+ RTE_SET_USED(free_cnt);
+
+ return 0;
+}
+
+int
+cnxk_nix_dev_get_reg(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ uint64_t *data = regs->data;
+ int rc = -ENOTSUP;
+
+ if (data == NULL) {
+ rc = roc_nix_lf_get_reg_count(nix);
+ if (rc > 0) {
+ regs->length = rc;
+ regs->width = 8;
+ rc = 0;
+ }
+ return rc;
+ }
+
+ if (!regs->length ||
+ regs->length == (uint32_t)roc_nix_lf_get_reg_count(nix))
+ return roc_nix_lf_reg_dump(nix, data);
+
+ return rc;
+}
+
+int
+cnxk_nix_reta_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ uint16_t reta[ROC_NIX_RSS_RETA_MAX];
+ struct roc_nix *nix = &dev->nix;
+ int i, j, rc = -EINVAL, idx = 0;
+
+ if (reta_size != dev->nix.reta_sz) {
+ plt_err("Size of hash lookup table configured (%d) does not "
+ "match the number hardware can supported (%d)",
+ reta_size, dev->nix.reta_sz);
+ goto fail;
+ }
+
+ /* Copy RETA table */
+ for (i = 0; i < (int)(dev->nix.reta_sz / RTE_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+ if ((reta_conf[i].mask >> j) & 0x01)
+ reta[idx] = reta_conf[i].reta[j];
+ idx++;
+ }
+ }
+
+ return roc_nix_rss_reta_set(nix, 0, reta);
+
+fail:
+ return rc;
+}
+
+int
+cnxk_nix_reta_query(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ uint16_t reta[ROC_NIX_RSS_RETA_MAX];
+ struct roc_nix *nix = &dev->nix;
+ int rc = -EINVAL, i, j, idx = 0;
+
+ if (reta_size != dev->nix.reta_sz) {
+ plt_err("Size of hash lookup table configured (%d) does not "
+ "match the number hardware can supported (%d)",
+ reta_size, dev->nix.reta_sz);
+ goto fail;
+ }
+
+ rc = roc_nix_rss_reta_get(nix, 0, reta);
+ if (rc)
+ goto fail;
+
+ /* Copy RETA table */
+ for (i = 0; i < (int)(dev->nix.reta_sz / RTE_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+ if ((reta_conf[i].mask >> j) & 0x01)
+ reta_conf[i].reta[j] = reta[idx];
+ idx++;
+ }
+ }
+
+ return 0;
+
+fail:
+ return rc;
+}
+
+int
+cnxk_nix_rss_hash_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ uint8_t rss_hash_level;
+ uint32_t flowkey_cfg;
+ int rc = -EINVAL;
+ uint8_t alg_idx;
+
+ if (rss_conf->rss_key && rss_conf->rss_key_len != ROC_NIX_RSS_KEY_LEN) {
+ plt_err("Hash key size mismatch %d vs %d",
+ rss_conf->rss_key_len, ROC_NIX_RSS_KEY_LEN);
+ goto fail;
+ }
+
+ if (rss_conf->rss_key)
+ roc_nix_rss_key_set(nix, rss_conf->rss_key);
+
+ rss_hash_level = ETH_RSS_LEVEL(rss_conf->rss_hf);
+ if (rss_hash_level)
+ rss_hash_level -= 1;
+ flowkey_cfg =
+ cnxk_rss_ethdev_to_nix(dev, rss_conf->rss_hf, rss_hash_level);
+
+ rc = roc_nix_rss_flowkey_set(nix, &alg_idx, flowkey_cfg,
+ ROC_NIX_RSS_GROUP_DEFAULT,
+ ROC_NIX_RSS_MCAM_IDX_DEFAULT);
+ if (rc) {
+ plt_err("Failed to set RSS hash function rc=%d", rc);
+ return rc;
+ }
+
+fail:
+ return rc;
+}
+
+int
+cnxk_nix_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
+ if (rss_conf->rss_key)
+ roc_nix_rss_key_get(&dev->nix, rss_conf->rss_key);
+
+ rss_conf->rss_key_len = ROC_NIX_RSS_KEY_LEN;
+ rss_conf->rss_hf = dev->ethdev_rss_hf;
+
+ return 0;
+}
+
+int
+cnxk_nix_mc_addr_list_configure(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct rte_ether_addr null_mac_addr;
+ struct roc_nix *nix = &dev->nix;
+ int rc, index;
+ uint32_t i;
+
+ memset(&null_mac_addr, 0, sizeof(null_mac_addr));
+
+ /* All configured multicast filters should be flushed first */
+ for (i = 0; i < dev->max_mac_entries; i++) {
+ if (rte_is_multicast_ether_addr(&data->mac_addrs[i])) {
+ rc = roc_nix_mac_addr_del(nix, i);
+ if (rc) {
+ plt_err("Failed to flush mcast address, rc=%d",
+ rc);
+ return rc;
+ }
+
+ dev->dmac_filter_count--;
+ /* Update address in NIC data structure */
+ rte_ether_addr_copy(&null_mac_addr,
+ &data->mac_addrs[i]);
+ }
+ }
+
+ if (!mc_addr_set || !nb_mc_addr)
+ return 0;
+
+ /* Check for available space */
+ if (nb_mc_addr >
+ ((uint32_t)(dev->max_mac_entries - dev->dmac_filter_count))) {
+ plt_err("No space is available to add multicast filters");
+ return -ENOSPC;
+ }
+
+ /* Multicast addresses are to be installed */
+ for (i = 0; i < nb_mc_addr; i++) {
+ index = roc_nix_mac_addr_add(nix, mc_addr_set[i].addr_bytes);
+ if (index < 0) {
+ plt_err("Failed to add mcast mac address, rc=%d",
+ index);
+ return index;
+ }
+
+ dev->dmac_filter_count++;
+ /* Update address in NIC data structure */
+ rte_ether_addr_copy(&mc_addr_set[i], &data->mac_addrs[index]);
+ }
+
+ roc_nix_npc_promisc_ena_dis(nix, true);
+ dev->dmac_filter_enable = true;
+ eth_dev->data->promiscuous = false;
+
+ return 0;
+}