uint64_t initial;
uint64_t table[1 << 8];
};
-extern struct fman_crc64_t FMAN_CRC64_ECMA_182;
+extern struct fman_crc64_t fman_crc64_ecma_182;
#define DECLARE_FMAN_CRC64_TABLE() \
-struct fman_crc64_t FMAN_CRC64_ECMA_182 = { \
+struct fman_crc64_t fman_crc64_ecma_182 = { \
0xFFFFFFFFFFFFFFFFULL, \
{ \
0x0000000000000000ULL, 0xb32e4cbe03a75f6fULL, \
*/
static inline uint64_t fman_crc64_init(void)
{
- return FMAN_CRC64_ECMA_182.initial;
+ return fman_crc64_ecma_182.initial;
}
/* Updates the CRC with arbitrary data */
{
uint8_t *p = data;
while (len--)
- crc = FMAN_CRC64_ECMA_182.table[(crc ^ *(p++)) & 0xff] ^
+ crc = fman_crc64_ecma_182.table[(crc ^ *(p++)) & 0xff] ^
(crc >> 8);
return crc;
}
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
switch (mode) {
case CDEV_SCHED_MODE_ROUNDROBIN:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
- roundrobin_scheduler) < 0) {
+ crypto_scheduler_roundrobin) < 0) {
CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
- pkt_size_based_distr_scheduler) < 0) {
+ crypto_scheduler_pkt_size_based_distr) < 0) {
CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
case CDEV_SCHED_MODE_FAILOVER:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
- failover_scheduler) < 0) {
+ crypto_scheduler_failover) < 0) {
CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
case CDEV_SCHED_MODE_MULTICORE:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
- multicore_scheduler) < 0) {
+ crypto_scheduler_multicore) < 0) {
CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
return -EINVAL;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
};
/** Round-robin mode scheduler */
-extern struct rte_cryptodev_scheduler *roundrobin_scheduler;
+extern struct rte_cryptodev_scheduler *crypto_scheduler_roundrobin;
/** Packet-size based distribution mode scheduler */
-extern struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler;
+extern struct rte_cryptodev_scheduler *crypto_scheduler_pkt_size_based_distr;
/** Fail-over mode scheduler */
-extern struct rte_cryptodev_scheduler *failover_scheduler;
+extern struct rte_cryptodev_scheduler *crypto_scheduler_failover;
/** multi-core mode scheduler */
-extern struct rte_cryptodev_scheduler *multicore_scheduler;
+extern struct rte_cryptodev_scheduler *crypto_scheduler_multicore;
#ifdef __cplusplus
}
.ops = &scheduler_fo_ops
};
-struct rte_cryptodev_scheduler *failover_scheduler = &fo_scheduler;
+struct rte_cryptodev_scheduler *crypto_scheduler_failover = &fo_scheduler;
.ops = &scheduler_mc_ops
};
-struct rte_cryptodev_scheduler *multicore_scheduler = &mc_scheduler;
+struct rte_cryptodev_scheduler *crypto_scheduler_multicore = &mc_scheduler;
.ops = &scheduler_ps_ops
};
-struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler = &psd_scheduler;
+struct rte_cryptodev_scheduler *crypto_scheduler_pkt_size_based_distr = &psd_scheduler;
#include "rte_cryptodev_scheduler.h"
#include "scheduler_pmd_private.h"
-uint8_t cryptodev_driver_id;
+uint8_t cryptodev_scheduler_driver_id;
struct scheduler_init_params {
struct rte_cryptodev_pmd_init_params def_p;
return -EFAULT;
}
- dev->driver_id = cryptodev_driver_id;
+ dev->driver_id = cryptodev_scheduler_driver_id;
dev->dev_ops = rte_crypto_scheduler_pmd_ops;
sched_ctx = dev->data->dev_private;
"slave=<name>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(scheduler_crypto_drv,
cryptodev_scheduler_pmd_drv.driver,
- cryptodev_driver_id);
+ cryptodev_scheduler_driver_id);
} __rte_cache_aligned;
-extern uint8_t cryptodev_driver_id;
+extern uint8_t cryptodev_scheduler_driver_id;
static __rte_always_inline uint16_t
get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops)
.ops = &scheduler_rr_ops
};
-struct rte_cryptodev_scheduler *roundrobin_scheduler = &scheduler;
+struct rte_cryptodev_scheduler *crypto_scheduler_roundrobin = &scheduler;
.addr_bytes = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x02 }
};
-struct port mode_8023ad_ports[RTE_MAX_ETHPORTS];
+struct port bond_mode_8023ad_ports[RTE_MAX_ETHPORTS];
static void
timer_cancel(uint64_t *timer)
static void
show_warnings(uint16_t slave_id)
{
- struct port *port = &mode_8023ad_ports[slave_id];
+ struct port *port = &bond_mode_8023ad_ports[slave_id];
uint8_t warnings;
do {
rx_machine(struct bond_dev_private *internals, uint16_t slave_id,
struct lacpdu *lacp)
{
- struct port *agg, *port = &mode_8023ad_ports[slave_id];
+ struct port *agg, *port = &bond_mode_8023ad_ports[slave_id];
uint64_t timeout;
if (SM_FLAG(port, BEGIN)) {
ACTOR_STATE_CLR(port, DEFAULTED);
/* If LACP partner params match this port actor params */
- agg = &mode_8023ad_ports[port->aggregator_port_id];
+ agg = &bond_mode_8023ad_ports[port->aggregator_port_id];
bool match = port->actor.system_priority ==
lacp->partner.port_params.system_priority &&
is_same_ether_addr(&agg->actor.system,
static void
periodic_machine(struct bond_dev_private *internals, uint16_t slave_id)
{
- struct port *port = &mode_8023ad_ports[slave_id];
+ struct port *port = &bond_mode_8023ad_ports[slave_id];
/* Calculate if either site is LACP enabled */
uint64_t timeout;
uint8_t active = ACTOR_STATE(port, LACP_ACTIVE) ||
static void
mux_machine(struct bond_dev_private *internals, uint16_t slave_id)
{
- struct port *port = &mode_8023ad_ports[slave_id];
+ struct port *port = &bond_mode_8023ad_ports[slave_id];
/* Save current state for later use */
const uint8_t state_mask = STATE_SYNCHRONIZATION | STATE_DISTRIBUTING |
static void
tx_machine(struct bond_dev_private *internals, uint16_t slave_id)
{
- struct port *agg, *port = &mode_8023ad_ports[slave_id];
+ struct port *agg, *port = &bond_mode_8023ad_ports[slave_id];
struct rte_mbuf *lacp_pkt = NULL;
struct lacpdu_header *hdr;
lacpdu->actor.info_length = sizeof(struct lacpdu_actor_partner_params);
memcpy(&hdr->lacpdu.actor.port_params, &port->actor,
sizeof(port->actor));
- agg = &mode_8023ad_ports[port->aggregator_port_id];
+ agg = &bond_mode_8023ad_ports[port->aggregator_port_id];
ether_addr_copy(&agg->actor.system, &hdr->lacpdu.actor.port_params.system);
lacpdu->actor.state = port->actor_state;
slaves = internals->active_slaves;
slaves_count = internals->active_slave_count;
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
/* Search for aggregator suitable for this port */
for (i = 0; i < slaves_count; ++i) {
- agg = &mode_8023ad_ports[slaves[i]];
+ agg = &bond_mode_8023ad_ports[slaves[i]];
/* Skip ports that are not aggreagators */
if (agg->aggregator_port_id != slaves[i])
continue;
} else
key = 0;
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
key = rte_cpu_to_be_16(key);
if (key != port->actor.key) {
for (i = 0; i < internals->active_slave_count; i++) {
slave_id = internals->active_slaves[i];
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
if ((port->actor.key &
rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) == 0) {
{
struct bond_dev_private *internals = bond_dev->data->dev_private;
- struct port *port = &mode_8023ad_ports[slave_id];
+ struct port *port = &bond_mode_8023ad_ports[slave_id];
struct port_params initial = {
.system = { { 0 } },
.system_priority = rte_cpu_to_be_16(0xFFFF),
struct port *port = NULL;
uint8_t old_partner_state;
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
ACTOR_STATE_CLR(port, AGGREGATION);
port->selected = UNSELECTED;
for (i = 0; i < internals->active_slave_count; i++) {
slave_id = internals->active_slaves[i];
- slave = &mode_8023ad_ports[slave_id];
+ slave = &bond_mode_8023ad_ports[slave_id];
rte_eth_macaddr_get(slave_id, &slave_addr);
if (is_same_ether_addr(&slave_addr, &slave->actor.system))
continue;
for (j = 0; j < internals->active_slave_count; j++) {
- agg_slave = &mode_8023ad_ports[internals->active_slaves[j]];
+ agg_slave = &bond_mode_8023ad_ports[internals->active_slaves[j]];
if (agg_slave->aggregator_port_id == slave_id)
SM_FLAG_SET(agg_slave, NTT);
}
uint16_t slave_id, struct rte_mbuf *pkt)
{
struct mode8023ad_private *mode4 = &internals->mode4;
- struct port *port = &mode_8023ad_ports[slave_id];
+ struct port *port = &bond_mode_8023ad_ports[slave_id];
struct marker_header *m_hdr;
uint64_t marker_timer, old_marker_timer;
int retval;
internals->active_slave_count)
return -EINVAL;
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
info->selected = port->selected;
info->actor_state = port->actor_state;
if (res != 0)
return res;
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
if (enabled)
ACTOR_STATE_SET(port, COLLECTING);
if (res != 0)
return res;
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
if (enabled)
ACTOR_STATE_SET(port, DISTRIBUTING);
if (err != 0)
return err;
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
return ACTOR_STATE(port, DISTRIBUTING);
}
if (err != 0)
return err;
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
return ACTOR_STATE(port, COLLECTING);
}
if (res != 0)
return res;
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
if (rte_pktmbuf_pkt_len(lacp_pkt) < sizeof(struct lacpdu_header))
return -EINVAL;
for (i = 0; i < internals->active_slave_count; i++) {
slave_id = internals->active_slaves[i];
- port = &mode_8023ad_ports[slave_id];
+ port = &bond_mode_8023ad_ports[slave_id];
if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) {
struct rte_mbuf *lacp_pkt = pkt;
* The pool of *port* structures. The size of the pool
* is configured at compile-time in the <rte_eth_bond_8023ad.c> file.
*/
-extern struct port mode_8023ad_ports[];
+extern struct port bond_mode_8023ad_ports[];
/* Forward declaration */
struct bond_dev_private;
dist_slave_count = 0;
for (i = 0; i < slave_count; i++) {
- struct port *port = &mode_8023ad_ports[slave_port_ids[i]];
+ struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
if (ACTOR_STATE(port, DISTRIBUTING))
dist_slave_port_ids[dist_slave_count++] =
}
for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
j = num_rx_total;
- collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[idx]],
+ collecting = ACTOR_STATE(&bond_mode_8023ad_ports[slaves[idx]],
COLLECTING);
/* Read packets from this slave */
dist_slave_count = 0;
for (i = 0; i < slave_count; i++) {
- struct port *port = &mode_8023ad_ports[slave_port_ids[i]];
+ struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
if (ACTOR_STATE(port, DISTRIBUTING))
dist_slave_port_ids[dist_slave_count++] =
/* Check for LACP control packets and send if available */
for (i = 0; i < slave_count; i++) {
- struct port *port = &mode_8023ad_ports[slave_port_ids[i]];
+ struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
struct rte_mbuf *ctrl_pkt = NULL;
if (likely(rte_ring_empty(port->tx_ring)))
int errval = 0;
struct bond_dev_private *internals = (struct bond_dev_private *)
bonded_eth_dev->data->dev_private;
- struct port *port = &mode_8023ad_ports[slave_eth_dev->data->port_id];
+ struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id];
if (port->slow_pool == NULL) {
char mem_name[256];
/* Discard all messages to/from mode 4 state machines */
for (i = 0; i < internals->active_slave_count; i++) {
- port = &mode_8023ad_ports[internals->active_slaves[i]];
+ port = &bond_mode_8023ad_ports[internals->active_slaves[i]];
RTE_ASSERT(port->rx_ring != NULL);
while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
return -EINVAL;
if (PRIV(dev)->pending_alarm)
return 0;
- ret = rte_eal_alarm_set(hotplug_poll * 1000,
+ ret = rte_eal_alarm_set(failsafe_hotplug_poll * 1000,
fs_hotplug_alarm,
dev);
if (ret) {
goto unregister_new_callback;
}
mac = &dev->data->mac_addrs[0];
- if (mac_from_arg) {
+ if (failsafe_mac_from_arg) {
/*
* If MAC address was provided as a parameter,
* apply to all probed slaves.
typedef int (parse_cb)(struct rte_eth_dev *dev, const char *params,
uint8_t head);
-uint64_t hotplug_poll = FAILSAFE_HOTPLUG_DEFAULT_TIMEOUT_MS;
-int mac_from_arg = 0;
+uint64_t failsafe_hotplug_poll = FAILSAFE_HOTPLUG_DEFAULT_TIMEOUT_MS;
+int failsafe_mac_from_arg;
static const char * const pmd_failsafe_init_parameters[] = {
PMD_FAILSAFE_HOTPLUG_POLL_KVARG,
if (arg_count == 1) {
ret = rte_kvargs_process(kvlist,
PMD_FAILSAFE_HOTPLUG_POLL_KVARG,
- &fs_get_u64_arg, &hotplug_poll);
+ &fs_get_u64_arg, &failsafe_hotplug_poll);
if (ret < 0)
goto free_kvlist;
}
if (ret < 0)
goto free_kvlist;
- mac_from_arg = 1;
+ failsafe_mac_from_arg = 1;
}
}
PRIV(dev)->state = DEV_PARSED;
extern const char pmd_failsafe_driver_name[];
extern const struct eth_dev_ops failsafe_ops;
extern const struct rte_flow_ops fs_flow_ops;
-extern uint64_t hotplug_poll;
-extern int mac_from_arg;
+extern uint64_t failsafe_hotplug_poll;
+extern int failsafe_mac_from_arg;
/* HELPERS */
union event_ring_data *data,
u8 fw_return_code);
-const char *ecore_channel_tlvs_string[] = {
+const char *qede_ecore_channel_tlvs_string[] = {
"CHANNEL_TLV_NONE", /* ends tlv sequence */
"CHANNEL_TLV_ACQUIRE",
"CHANNEL_TLV_VPORT_START",
ECORE_MSG_IOV,
"VF[%d]: vf pf channel locked by %s\n",
vf->abs_vf_id,
- ecore_channel_tlvs_string[tlv]);
+ qede_ecore_channel_tlvs_string[tlv]);
else
DP_VERBOSE(p_hwfn,
ECORE_MSG_IOV,
ECORE_MSG_IOV,
"VF[%d]: vf pf channel unlocked by %s\n",
vf->abs_vf_id,
- ecore_channel_tlvs_string[expected_tlv]);
+ qede_ecore_channel_tlvs_string[expected_tlv]);
else
DP_VERBOSE(p_hwfn,
ECORE_MSG_IOV,
if (ecore_iov_tlv_supported(tlv->type))
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"TLV number %d: type %s, length %d\n",
- i, ecore_channel_tlvs_string[tlv->type],
+ i, qede_ecore_channel_tlvs_string[tlv->type],
tlv->length);
else
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
if (p_tlv->type == req_type) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"Extended tlv type %s, length %d found\n",
- ecore_channel_tlvs_string[p_tlv->type],
+ qede_ecore_channel_tlvs_string[p_tlv->type],
p_tlv->length);
return p_tlv;
}
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"preparing to send %s tlv over vf pf channel\n",
- ecore_channel_tlvs_string[type]);
+ qede_ecore_channel_tlvs_string[type]);
/* Reset Request offset */
p_iov->offset = (u8 *)(p_iov->vf2pf_request);
return !!p_data->sge_tpa_params;
default:
DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d] %s\n",
- tlv, ecore_channel_tlvs_string[tlv]);
+ tlv, qede_ecore_channel_tlvs_string[tlv]);
return false;
}
}
if (p_resp && p_resp->hdr.status)
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"TLV[%d] type %s Configuration %s\n",
- tlv, ecore_channel_tlvs_string[tlv],
+ tlv, qede_ecore_channel_tlvs_string[tlv],
(p_resp && p_resp->hdr.status) ? "succeeded"
: "failed");
}
/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/
};
-extern const char *ecore_channel_tlvs_string[];
+extern const char *qede_ecore_channel_tlvs_string[];
#endif /* __ECORE_VF_PF_IF_H__ */
(info->mfw_rev >> 16) & 0xff,
(info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff);
DP_INFO(edev, " Management Firmware version : %s\n", ver_str);
- DP_INFO(edev, " Firmware file : %s\n", fw_file);
+ DP_INFO(edev, " Firmware file : %s\n", qede_fw_file);
DP_INFO(edev, "*********************************\n");
}
-extern char fw_file[];
+extern char qede_fw_file[];
/* Number of PF connections - 32 RX + 32 TX */
#define QEDE_PF_NUM_CONNS (64)
#define QEDE_ALARM_TIMEOUT_US 100000
/* Global variable to hold absolute path of fw file */
-char fw_file[PATH_MAX];
+char qede_fw_file[PATH_MAX];
static const char * const QEDE_DEFAULT_FIRMWARE =
"/lib/firmware/qed/qed_init_values-8.37.7.0.bin";
const char *fw = RTE_LIBRTE_QEDE_FW;
if (strcmp(fw, "") == 0)
- strcpy(fw_file, QEDE_DEFAULT_FIRMWARE);
+ strcpy(qede_fw_file, QEDE_DEFAULT_FIRMWARE);
else
- strcpy(fw_file, fw);
+ strcpy(qede_fw_file, fw);
- fd = open(fw_file, O_RDONLY);
+ fd = open(qede_fw_file, O_RDONLY);
if (fd < 0) {
DP_ERR(edev, "Can't open firmware file\n");
return -ENOENT;
#ifdef CONFIG_ECORE_BINARY_FW
rc = qed_load_firmware_data(edev);
if (rc) {
- DP_ERR(edev, "Failed to find fw file %s\n", fw_file);
+ DP_ERR(edev, "Failed to find fw file %s\n",
+ qede_fw_file);
goto err;
}
#endif
int enable);
};
-struct virtio_user_backend_ops ops_user;
-struct virtio_user_backend_ops ops_kernel;
+extern struct virtio_user_backend_ops virtio_ops_user;
+extern struct virtio_user_backend_ops virtio_ops_kernel;
#endif
return 0;
}
-struct virtio_user_backend_ops ops_kernel = {
+struct virtio_user_backend_ops virtio_ops_kernel = {
.setup = vhost_kernel_setup,
.send_request = vhost_kernel_ioctl,
.enable_qp = vhost_kernel_enable_queue_pair
return 0;
}
-struct virtio_user_backend_ops ops_user = {
+struct virtio_user_backend_ops virtio_ops_user = {
.setup = vhost_user_setup,
.send_request = vhost_user_sock,
.enable_qp = vhost_user_enable_queue_pair
PMD_DRV_LOG(ERR, "Server mode doesn't support vhost-kernel!");
return -1;
}
- dev->ops = &ops_user;
+ dev->ops = &virtio_ops_user;
} else {
if (is_vhost_user_by_type(dev->path)) {
- dev->ops = &ops_user;
+ dev->ops = &virtio_ops_user;
} else {
- dev->ops = &ops_kernel;
+ dev->ops = &virtio_ops_kernel;
dev->vhostfds = malloc(dev->max_queue_pairs *
sizeof(int));
.resource_size = sizeof(struct feature_port_header),
.feature_index = PORT_FEATURE_ID_HEADER,
.revision_id = PORT_HEADER_REVISION,
- .ops = &port_hdr_ops,
+ .ops = &ifpga_rawdev_port_hdr_ops,
},
{
.name = PORT_FEATURE_ERR,
.resource_size = sizeof(struct feature_port_error),
.feature_index = PORT_FEATURE_ID_ERROR,
.revision_id = PORT_ERR_REVISION,
- .ops = &port_error_ops,
+ .ops = &ifpga_rawdev_port_error_ops,
},
{
.name = PORT_FEATURE_UMSG,
.resource_size = sizeof(struct feature_port_uint),
.feature_index = PORT_FEATURE_ID_UINT,
.revision_id = PORT_UINT_REVISION,
- .ops = &port_uint_ops,
+ .ops = &ifpga_rawdev_port_uint_ops,
},
{
.name = PORT_FEATURE_STP,
.resource_size = PORT_FEATURE_STP_REGION_SIZE,
.feature_index = PORT_FEATURE_ID_STP,
.revision_id = PORT_STP_REVISION,
- .ops = &port_stp_ops,
+ .ops = &ifpga_rawdev_port_stp_ops,
},
{
.name = PORT_FEATURE_UAFU,
int port_set_irq(struct ifpga_port_hw *port, u32 feature_id, void *irq_set);
-extern struct feature_ops port_hdr_ops;
-extern struct feature_ops port_error_ops;
-extern struct feature_ops port_stp_ops;
-extern struct feature_ops port_uint_ops;
+extern struct feature_ops ifpga_rawdev_port_hdr_ops;
+extern struct feature_ops ifpga_rawdev_port_error_ops;
+extern struct feature_ops ifpga_rawdev_port_stp_ops;
+extern struct feature_ops ifpga_rawdev_port_uint_ops;
/* help functions for feature ops */
int fpga_msix_set_block(struct feature *feature, unsigned int start,
return -ENOENT;
}
-struct feature_ops port_hdr_ops = {
+struct feature_ops ifpga_rawdev_port_hdr_ops = {
.init = port_hdr_init,
.uinit = port_hdr_uinit,
.get_prop = port_hdr_get_prop,
dev_info(NULL, "port stp uinit.\n");
}
-struct feature_ops port_stp_ops = {
+struct feature_ops ifpga_rawdev_port_stp_ops = {
.init = port_stp_init,
.uinit = port_stp_uinit,
};
dev_info(NULL, "PORT UINT UInit.\n");
}
-struct feature_ops port_uint_ops = {
+struct feature_ops ifpga_rawdev_port_uint_ops = {
.init = port_uint_init,
.uinit = port_uint_uinit,
};
return -ENOENT;
}
-struct feature_ops port_error_ops = {
+struct feature_ops ifpga_rawdev_port_error_ops = {
.init = port_error_init,
.uinit = port_error_uinit,
.get_prop = port_error_get_prop,