From: Jasvinder Singh Date: Fri, 6 Jul 2018 17:20:54 +0000 (+0100) Subject: net/softnic: restructure X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=7e68bc20f8c8e8cb411a5ae15b5edfb7b6bbb2c6;p=dpdk.git net/softnic: restructure Rework the softnic implementation to have flexiblity in enabling more features to its receive and transmit data path. Signed-off-by: Cristian Dumitrescu Signed-off-by: Jasvinder Singh --- diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c index 65166c1adf..ec3d91edbc 100644 --- a/drivers/net/softnic/rte_eth_softnic.c +++ b/drivers/net/softnic/rte_eth_softnic.c @@ -13,40 +13,17 @@ #include #include #include -#include #include #include "rte_eth_softnic.h" #include "rte_eth_softnic_internals.h" -#define DEV_HARD(p) \ - (&rte_eth_devices[p->hard.port_id]) - -#define PMD_PARAM_SOFT_TM "soft_tm" -#define PMD_PARAM_SOFT_TM_RATE "soft_tm_rate" -#define PMD_PARAM_SOFT_TM_NB_QUEUES "soft_tm_nb_queues" -#define PMD_PARAM_SOFT_TM_QSIZE0 "soft_tm_qsize0" -#define PMD_PARAM_SOFT_TM_QSIZE1 "soft_tm_qsize1" -#define PMD_PARAM_SOFT_TM_QSIZE2 "soft_tm_qsize2" -#define PMD_PARAM_SOFT_TM_QSIZE3 "soft_tm_qsize3" -#define PMD_PARAM_SOFT_TM_ENQ_BSZ "soft_tm_enq_bsz" -#define PMD_PARAM_SOFT_TM_DEQ_BSZ "soft_tm_deq_bsz" - -#define PMD_PARAM_HARD_NAME "hard_name" -#define PMD_PARAM_HARD_TX_QUEUE_ID "hard_tx_queue_id" +#define PMD_PARAM_FIRMWARE "firmware" +#define PMD_PARAM_CPU_ID "cpu_id" static const char *pmd_valid_args[] = { - PMD_PARAM_SOFT_TM, - PMD_PARAM_SOFT_TM_RATE, - PMD_PARAM_SOFT_TM_NB_QUEUES, - PMD_PARAM_SOFT_TM_QSIZE0, - PMD_PARAM_SOFT_TM_QSIZE1, - PMD_PARAM_SOFT_TM_QSIZE2, - PMD_PARAM_SOFT_TM_QSIZE3, - PMD_PARAM_SOFT_TM_ENQ_BSZ, - PMD_PARAM_SOFT_TM_DEQ_BSZ, - PMD_PARAM_HARD_NAME, - PMD_PARAM_HARD_TX_QUEUE_ID, + PMD_PARAM_FIRMWARE, + PMD_PARAM_CPU_ID, NULL }; @@ -82,50 +59,35 @@ pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused, } static int -pmd_dev_configure(struct rte_eth_dev *dev) +pmd_dev_configure(struct rte_eth_dev *dev __rte_unused) { - struct pmd_internals *p = dev->data->dev_private; - struct rte_eth_dev *hard_dev = DEV_HARD(p); - - if (dev->data->nb_rx_queues > hard_dev->data->nb_rx_queues) - return -1; - - if (p->params.hard.tx_queue_id >= hard_dev->data->nb_tx_queues) - return -1; - return 0; } static int pmd_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, - uint16_t nb_rx_desc __rte_unused, + uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf __rte_unused, struct rte_mempool *mb_pool __rte_unused) { - struct pmd_internals *p = dev->data->dev_private; - - if (p->params.soft.intrusive == 0) { - struct pmd_rx_queue *rxq; - - rxq = rte_zmalloc_socket(p->params.soft.name, - sizeof(struct pmd_rx_queue), 0, socket_id); - if (rxq == NULL) - return -ENOMEM; + uint32_t size = RTE_ETH_NAME_MAX_LEN + strlen("_rxq") + 4; + char name[size]; + struct rte_ring *r; - rxq->hard.port_id = p->hard.port_id; - rxq->hard.rx_queue_id = rx_queue_id; - dev->data->rx_queues[rx_queue_id] = rxq; - } else { - struct rte_eth_dev *hard_dev = DEV_HARD(p); - void *rxq = hard_dev->data->rx_queues[rx_queue_id]; + snprintf(name, sizeof(name), "%s_rxq%04x", + dev->data->name, + rx_queue_id); - if (rxq == NULL) - return -1; + r = rte_ring_create(name, + nb_rx_desc, + socket_id, + RING_F_SP_ENQ | RING_F_SC_DEQ); + if (r == NULL) + return -1; - dev->data->rx_queues[rx_queue_id] = rxq; - } + dev->data->rx_queues[rx_queue_id] = r; return 0; } @@ -141,8 +103,12 @@ pmd_tx_queue_setup(struct rte_eth_dev *dev, struct rte_ring *r; snprintf(name, sizeof(name), "%s_txq%04x", - dev->data->name, tx_queue_id); - r = rte_ring_create(name, nb_tx_desc, socket_id, + dev->data->name, + tx_queue_id); + + r = rte_ring_create(name, + nb_tx_desc, + socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ); if (r == NULL) return -1; @@ -154,36 +120,15 @@ pmd_tx_queue_setup(struct rte_eth_dev *dev, static int pmd_dev_start(struct rte_eth_dev *dev) { - struct pmd_internals *p = dev->data->dev_private; - - if (tm_used(dev)) { - int status = tm_start(p); - - if (status) - return status; - } - dev->data->dev_link.link_status = ETH_LINK_UP; - if (p->params.soft.intrusive) { - struct rte_eth_dev *hard_dev = DEV_HARD(p); - - /* The hard_dev->rx_pkt_burst should be stable by now */ - dev->rx_pkt_burst = hard_dev->rx_pkt_burst; - } - return 0; } static void pmd_dev_stop(struct rte_eth_dev *dev) { - struct pmd_internals *p = dev->data->dev_private; - dev->data->dev_link.link_status = ETH_LINK_DOWN; - - if (tm_used(dev)) - tm_stop(p); } static void @@ -191,6 +136,10 @@ pmd_dev_close(struct rte_eth_dev *dev) { uint32_t i; + /* RX queues */ + for (i = 0; i < dev->data->nb_rx_queues; i++) + rte_ring_free((struct rte_ring *)dev->data->rx_queues[i]); + /* TX queues */ for (i = 0; i < dev->data->nb_tx_queues; i++) rte_ring_free((struct rte_ring *)dev->data->tx_queues[i]); @@ -204,10 +153,9 @@ pmd_link_update(struct rte_eth_dev *dev __rte_unused, } static int -pmd_tm_ops_get(struct rte_eth_dev *dev, void *arg) +pmd_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg) { - *(const struct rte_tm_ops **)arg = - (tm_enabled(dev)) ? &pmd_tm_ops : NULL; + *(const struct rte_tm_ops **)arg = NULL; return 0; } @@ -229,12 +177,10 @@ pmd_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { - struct pmd_rx_queue *rx_queue = rxq; - - return rte_eth_rx_burst(rx_queue->hard.port_id, - rx_queue->hard.rx_queue_id, - rx_pkts, - nb_pkts); + return (uint16_t)rte_ring_sc_dequeue_burst(rxq, + (void **)rx_pkts, + nb_pkts, + NULL); } static uint16_t @@ -242,243 +188,31 @@ pmd_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - return (uint16_t)rte_ring_enqueue_burst(txq, + return (uint16_t)rte_ring_sp_enqueue_burst(txq, (void **)tx_pkts, nb_pkts, NULL); } -static __rte_always_inline int -run_default(struct rte_eth_dev *dev) -{ - struct pmd_internals *p = dev->data->dev_private; - - /* Persistent context: Read Only (update not required) */ - struct rte_mbuf **pkts = p->soft.def.pkts; - uint16_t nb_tx_queues = dev->data->nb_tx_queues; - - /* Persistent context: Read - Write (update required) */ - uint32_t txq_pos = p->soft.def.txq_pos; - uint32_t pkts_len = p->soft.def.pkts_len; - uint32_t flush_count = p->soft.def.flush_count; - - /* Not part of the persistent context */ - uint32_t pos; - uint16_t i; - - /* Soft device TXQ read, Hard device TXQ write */ - for (i = 0; i < nb_tx_queues; i++) { - struct rte_ring *txq = dev->data->tx_queues[txq_pos]; - - /* Read soft device TXQ burst to packet enqueue buffer */ - pkts_len += rte_ring_sc_dequeue_burst(txq, - (void **)&pkts[pkts_len], - DEFAULT_BURST_SIZE, - NULL); - - /* Increment soft device TXQ */ - txq_pos++; - if (txq_pos >= nb_tx_queues) - txq_pos = 0; - - /* Hard device TXQ write when complete burst is available */ - if (pkts_len >= DEFAULT_BURST_SIZE) { - for (pos = 0; pos < pkts_len; ) - pos += rte_eth_tx_burst(p->hard.port_id, - p->params.hard.tx_queue_id, - &pkts[pos], - (uint16_t)(pkts_len - pos)); - - pkts_len = 0; - flush_count = 0; - break; - } - } - - if (flush_count >= FLUSH_COUNT_THRESHOLD) { - for (pos = 0; pos < pkts_len; ) - pos += rte_eth_tx_burst(p->hard.port_id, - p->params.hard.tx_queue_id, - &pkts[pos], - (uint16_t)(pkts_len - pos)); - - pkts_len = 0; - flush_count = 0; - } - - p->soft.def.txq_pos = txq_pos; - p->soft.def.pkts_len = pkts_len; - p->soft.def.flush_count = flush_count + 1; - - return 0; -} - -static __rte_always_inline int -run_tm(struct rte_eth_dev *dev) -{ - struct pmd_internals *p = dev->data->dev_private; - - /* Persistent context: Read Only (update not required) */ - struct rte_sched_port *sched = p->soft.tm.sched; - struct rte_mbuf **pkts_enq = p->soft.tm.pkts_enq; - struct rte_mbuf **pkts_deq = p->soft.tm.pkts_deq; - uint32_t enq_bsz = p->params.soft.tm.enq_bsz; - uint32_t deq_bsz = p->params.soft.tm.deq_bsz; - uint16_t nb_tx_queues = dev->data->nb_tx_queues; - - /* Persistent context: Read - Write (update required) */ - uint32_t txq_pos = p->soft.tm.txq_pos; - uint32_t pkts_enq_len = p->soft.tm.pkts_enq_len; - uint32_t flush_count = p->soft.tm.flush_count; - - /* Not part of the persistent context */ - uint32_t pkts_deq_len, pos; - uint16_t i; - - /* Soft device TXQ read, TM enqueue */ - for (i = 0; i < nb_tx_queues; i++) { - struct rte_ring *txq = dev->data->tx_queues[txq_pos]; - - /* Read TXQ burst to packet enqueue buffer */ - pkts_enq_len += rte_ring_sc_dequeue_burst(txq, - (void **)&pkts_enq[pkts_enq_len], - enq_bsz, - NULL); - - /* Increment TXQ */ - txq_pos++; - if (txq_pos >= nb_tx_queues) - txq_pos = 0; - - /* TM enqueue when complete burst is available */ - if (pkts_enq_len >= enq_bsz) { - rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len); - - pkts_enq_len = 0; - flush_count = 0; - break; - } - } - - if (flush_count >= FLUSH_COUNT_THRESHOLD) { - if (pkts_enq_len) - rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len); - - pkts_enq_len = 0; - flush_count = 0; - } - - p->soft.tm.txq_pos = txq_pos; - p->soft.tm.pkts_enq_len = pkts_enq_len; - p->soft.tm.flush_count = flush_count + 1; - - /* TM dequeue, Hard device TXQ write */ - pkts_deq_len = rte_sched_port_dequeue(sched, pkts_deq, deq_bsz); - - for (pos = 0; pos < pkts_deq_len; ) - pos += rte_eth_tx_burst(p->hard.port_id, - p->params.hard.tx_queue_id, - &pkts_deq[pos], - (uint16_t)(pkts_deq_len - pos)); - - return 0; -} - int -rte_pmd_softnic_run(uint16_t port_id) -{ - struct rte_eth_dev *dev = &rte_eth_devices[port_id]; - -#ifdef RTE_LIBRTE_ETHDEV_DEBUG - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0); -#endif - - return (tm_used(dev)) ? run_tm(dev) : run_default(dev); -} - -static struct ether_addr eth_addr = { .addr_bytes = {0} }; - -static uint32_t -eth_dev_speed_max_mbps(uint32_t speed_capa) +rte_pmd_softnic_run(uint16_t port_id __rte_unused) { - uint32_t rate_mbps[32] = { - ETH_SPEED_NUM_NONE, - ETH_SPEED_NUM_10M, - ETH_SPEED_NUM_10M, - ETH_SPEED_NUM_100M, - ETH_SPEED_NUM_100M, - ETH_SPEED_NUM_1G, - ETH_SPEED_NUM_2_5G, - ETH_SPEED_NUM_5G, - ETH_SPEED_NUM_10G, - ETH_SPEED_NUM_20G, - ETH_SPEED_NUM_25G, - ETH_SPEED_NUM_40G, - ETH_SPEED_NUM_50G, - ETH_SPEED_NUM_56G, - ETH_SPEED_NUM_100G, - }; - - uint32_t pos = (speed_capa) ? (31 - __builtin_clz(speed_capa)) : 0; - return rate_mbps[pos]; -} - -static int -default_init(struct pmd_internals *p, - struct pmd_params *params, - int numa_node) -{ - p->soft.def.pkts = rte_zmalloc_socket(params->soft.name, - 2 * DEFAULT_BURST_SIZE * sizeof(struct rte_mbuf *), - 0, - numa_node); - - if (p->soft.def.pkts == NULL) - return -ENOMEM; - return 0; } -static void -default_free(struct pmd_internals *p) -{ - rte_free(p->soft.def.pkts); -} - static void * -pmd_init(struct pmd_params *params, int numa_node) +pmd_init(struct pmd_params *params) { struct pmd_internals *p; - int status; - p = rte_zmalloc_socket(params->soft.name, + p = rte_zmalloc_socket(params->name, sizeof(struct pmd_internals), 0, - numa_node); + params->cpu_id); if (p == NULL) return NULL; memcpy(&p->params, params, sizeof(p->params)); - rte_eth_dev_get_port_by_name(params->hard.name, &p->hard.port_id); - - /* Default */ - status = default_init(p, params, numa_node); - if (status) { - free(p->params.hard.name); - rte_free(p); - return NULL; - } - - /* Traffic Management (TM)*/ - if (params->soft.flags & PMD_FEATURE_TM) { - status = tm_init(p, params, numa_node); - if (status) { - default_free(p); - free(p->params.hard.name); - rte_free(p); - return NULL; - } - } return p; } @@ -486,57 +220,44 @@ pmd_init(struct pmd_params *params, int numa_node) static void pmd_free(struct pmd_internals *p) { - if (p->params.soft.flags & PMD_FEATURE_TM) - tm_free(p); - - default_free(p); - - free(p->params.hard.name); rte_free(p); } +static struct ether_addr eth_addr = { + .addr_bytes = {0}, +}; + static int pmd_ethdev_register(struct rte_vdev_device *vdev, struct pmd_params *params, void *dev_private) { - struct rte_eth_dev_info hard_info; - struct rte_eth_dev *soft_dev; - uint32_t hard_speed; - int numa_node; - uint16_t hard_port_id; - - rte_eth_dev_get_port_by_name(params->hard.name, &hard_port_id); - rte_eth_dev_info_get(hard_port_id, &hard_info); - hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa); - numa_node = rte_eth_dev_socket_id(hard_port_id); + struct rte_eth_dev *dev; /* Ethdev entry allocation */ - soft_dev = rte_eth_dev_allocate(params->soft.name); - if (!soft_dev) + dev = rte_eth_dev_allocate(params->name); + if (!dev) return -ENOMEM; /* dev */ - soft_dev->rx_pkt_burst = (params->soft.intrusive) ? - NULL : /* set up later */ - pmd_rx_pkt_burst; - soft_dev->tx_pkt_burst = pmd_tx_pkt_burst; - soft_dev->tx_pkt_prepare = NULL; - soft_dev->dev_ops = &pmd_ops; - soft_dev->device = &vdev->device; + dev->rx_pkt_burst = pmd_rx_pkt_burst; + dev->tx_pkt_burst = pmd_tx_pkt_burst; + dev->tx_pkt_prepare = NULL; + dev->dev_ops = &pmd_ops; + dev->device = &vdev->device; /* dev->data */ - soft_dev->data->dev_private = dev_private; - soft_dev->data->dev_link.link_speed = hard_speed; - soft_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; - soft_dev->data->dev_link.link_autoneg = ETH_LINK_FIXED; - soft_dev->data->dev_link.link_status = ETH_LINK_DOWN; - soft_dev->data->mac_addrs = ð_addr; - soft_dev->data->promiscuous = 1; - soft_dev->data->kdrv = RTE_KDRV_NONE; - soft_dev->data->numa_node = numa_node; - - rte_eth_dev_probing_finish(soft_dev); + dev->data->dev_private = dev_private; + dev->data->dev_link.link_speed = ETH_SPEED_NUM_100G; + dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; + dev->data->dev_link.link_autoneg = ETH_LINK_FIXED; + dev->data->dev_link.link_status = ETH_LINK_DOWN; + dev->data->mac_addrs = ð_addr; + dev->data->promiscuous = 1; + dev->data->kdrv = RTE_KDRV_NONE; + dev->data->numa_node = params->cpu_id; + + rte_eth_dev_probing_finish(dev); return 0; } @@ -567,10 +288,10 @@ get_uint32(const char *key __rte_unused, const char *value, void *extra_args) } static int -pmd_parse_args(struct pmd_params *p, const char *name, const char *params) +pmd_parse_args(struct pmd_params *p, const char *params) { struct rte_kvargs *kvlist; - int i, ret; + int ret = 0; kvlist = rte_kvargs_parse(params, pmd_valid_args); if (kvlist == NULL) @@ -578,141 +299,21 @@ pmd_parse_args(struct pmd_params *p, const char *name, const char *params) /* Set default values */ memset(p, 0, sizeof(*p)); - p->soft.name = name; - p->soft.intrusive = INTRUSIVE; - p->soft.tm.rate = 0; - p->soft.tm.nb_queues = SOFTNIC_SOFT_TM_NB_QUEUES; - for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) - p->soft.tm.qsize[i] = SOFTNIC_SOFT_TM_QUEUE_SIZE; - p->soft.tm.enq_bsz = SOFTNIC_SOFT_TM_ENQ_BSZ; - p->soft.tm.deq_bsz = SOFTNIC_SOFT_TM_DEQ_BSZ; - p->hard.tx_queue_id = SOFTNIC_HARD_TX_QUEUE_ID; - - /* SOFT: TM (optional) */ - if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM) == 1) { - char *s; - - ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM, - &get_string, &s); - if (ret < 0) - goto out_free; - - if (strcmp(s, "on") == 0) - p->soft.flags |= PMD_FEATURE_TM; - else if (strcmp(s, "off") == 0) - p->soft.flags &= ~PMD_FEATURE_TM; - else - ret = -EINVAL; - - free(s); - if (ret) - goto out_free; - } - - /* SOFT: TM rate (measured in bytes/second) (optional) */ - if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_RATE) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_RATE, - &get_uint32, &p->soft.tm.rate); - if (ret < 0) - goto out_free; - - p->soft.flags |= PMD_FEATURE_TM; - } - - /* SOFT: TM number of queues (optional) */ - if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES, - &get_uint32, &p->soft.tm.nb_queues); - if (ret < 0) - goto out_free; - - p->soft.flags |= PMD_FEATURE_TM; - } - - /* SOFT: TM queue size 0 .. 3 (optional) */ - if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE0) == 1) { - uint32_t qsize; - - ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE0, - &get_uint32, &qsize); - if (ret < 0) - goto out_free; - - p->soft.tm.qsize[0] = (uint16_t)qsize; - p->soft.flags |= PMD_FEATURE_TM; - } - - if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE1) == 1) { - uint32_t qsize; - - ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE1, - &get_uint32, &qsize); - if (ret < 0) - goto out_free; + p->firmware = SOFTNIC_FIRMWARE; + p->cpu_id = SOFTNIC_CPU_ID; - p->soft.tm.qsize[1] = (uint16_t)qsize; - p->soft.flags |= PMD_FEATURE_TM; - } - - if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE2) == 1) { - uint32_t qsize; - - ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE2, - &get_uint32, &qsize); - if (ret < 0) - goto out_free; - - p->soft.tm.qsize[2] = (uint16_t)qsize; - p->soft.flags |= PMD_FEATURE_TM; - } - - if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE3) == 1) { - uint32_t qsize; - - ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE3, - &get_uint32, &qsize); - if (ret < 0) - goto out_free; - - p->soft.tm.qsize[3] = (uint16_t)qsize; - p->soft.flags |= PMD_FEATURE_TM; - } - - /* SOFT: TM enqueue burst size (optional) */ - if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ, - &get_uint32, &p->soft.tm.enq_bsz); - if (ret < 0) - goto out_free; - - p->soft.flags |= PMD_FEATURE_TM; - } - - /* SOFT: TM dequeue burst size (optional) */ - if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ, - &get_uint32, &p->soft.tm.deq_bsz); + /* Firmware script (optional) */ + if (rte_kvargs_count(kvlist, PMD_PARAM_FIRMWARE) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_FIRMWARE, + &get_string, &p->firmware); if (ret < 0) goto out_free; - - p->soft.flags |= PMD_FEATURE_TM; } - /* HARD: name (mandatory) */ - if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_NAME) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_NAME, - &get_string, &p->hard.name); - if (ret < 0) - goto out_free; - } else { - ret = -EINVAL; - goto out_free; - } - - /* HARD: tx_queue_id (optional) */ - if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID) == 1) { - ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID, - &get_uint32, &p->hard.tx_queue_id); + /* CPU ID (optional) */ + if (rte_kvargs_count(kvlist, PMD_PARAM_CPU_ID) == 1) { + ret = rte_kvargs_process(kvlist, PMD_PARAM_CPU_ID, + &get_uint32, &p->cpu_id); if (ret < 0) goto out_free; } @@ -727,68 +328,31 @@ pmd_probe(struct rte_vdev_device *vdev) { struct pmd_params p; const char *params; - int status; + int status = 0; - struct rte_eth_dev_info hard_info; - uint32_t hard_speed; - uint16_t hard_port_id; - int numa_node; void *dev_private; - struct rte_eth_dev *eth_dev; const char *name = rte_vdev_device_name(vdev); PMD_LOG(INFO, "Probing device \"%s\"", name); /* Parse input arguments */ params = rte_vdev_device_args(vdev); - - if (rte_eal_process_type() == RTE_PROC_SECONDARY && - strlen(params) == 0) { - eth_dev = rte_eth_dev_attach_secondary(name); - if (!eth_dev) { - PMD_LOG(ERR, "Failed to probe %s", name); - return -1; - } - /* TODO: request info from primary to set up Rx and Tx */ - eth_dev->dev_ops = &pmd_ops; - rte_eth_dev_probing_finish(eth_dev); - return 0; - } - if (!params) return -EINVAL; - status = pmd_parse_args(&p, rte_vdev_device_name(vdev), params); + status = pmd_parse_args(&p, params); if (status) return status; - /* Check input arguments */ - if (rte_eth_dev_get_port_by_name(p.hard.name, &hard_port_id)) - return -EINVAL; - - rte_eth_dev_info_get(hard_port_id, &hard_info); - hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa); - numa_node = rte_eth_dev_socket_id(hard_port_id); - - if (p.hard.tx_queue_id >= hard_info.max_tx_queues) - return -EINVAL; - - if (p.soft.flags & PMD_FEATURE_TM) { - status = tm_params_check(&p, hard_speed); - - if (status) - return status; - } + p.name = name; /* Allocate and initialize soft ethdev private data */ - dev_private = pmd_init(&p, numa_node); + dev_private = pmd_init(&p); if (dev_private == NULL) return -ENOMEM; /* Register soft ethdev */ - PMD_LOG(INFO, - "Creating soft ethdev \"%s\" for hard ethdev \"%s\"", - p.soft.name, p.hard.name); + PMD_LOG(INFO, "Creating soft ethdev \"%s\"", p.name); status = pmd_ethdev_register(vdev, &p, dev_private); if (status) { @@ -808,8 +372,7 @@ pmd_remove(struct rte_vdev_device *vdev) if (!vdev) return -EINVAL; - PMD_LOG(INFO, "Removing device \"%s\"", - rte_vdev_device_name(vdev)); + PMD_LOG(INFO, "Removing device \"%s\"", rte_vdev_device_name(vdev)); /* Find the ethdev entry */ dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev)); @@ -818,9 +381,9 @@ pmd_remove(struct rte_vdev_device *vdev) p = dev->data->dev_private; /* Free device data structures*/ - pmd_free(p); rte_free(dev->data); rte_eth_dev_release_port(dev); + pmd_free(p); return 0; } @@ -832,17 +395,8 @@ static struct rte_vdev_driver pmd_softnic_drv = { RTE_PMD_REGISTER_VDEV(net_softnic, pmd_softnic_drv); RTE_PMD_REGISTER_PARAM_STRING(net_softnic, - PMD_PARAM_SOFT_TM "=on|off " - PMD_PARAM_SOFT_TM_RATE "= " - PMD_PARAM_SOFT_TM_NB_QUEUES "= " - PMD_PARAM_SOFT_TM_QSIZE0 "= " - PMD_PARAM_SOFT_TM_QSIZE1 "= " - PMD_PARAM_SOFT_TM_QSIZE2 "= " - PMD_PARAM_SOFT_TM_QSIZE3 "= " - PMD_PARAM_SOFT_TM_ENQ_BSZ "= " - PMD_PARAM_SOFT_TM_DEQ_BSZ "= " - PMD_PARAM_HARD_NAME "= " - PMD_PARAM_HARD_TX_QUEUE_ID "="); + PMD_PARAM_FIRMWARE "= " + PMD_PARAM_CPU_ID "="); RTE_INIT(pmd_softnic_init_log) { diff --git a/drivers/net/softnic/rte_eth_softnic.h b/drivers/net/softnic/rte_eth_softnic.h index 9a2c7ba9f5..fb1d170d69 100644 --- a/drivers/net/softnic/rte_eth_softnic.h +++ b/drivers/net/softnic/rte_eth_softnic.h @@ -11,37 +11,23 @@ extern "C" { #endif -#ifndef SOFTNIC_SOFT_TM_NB_QUEUES -#define SOFTNIC_SOFT_TM_NB_QUEUES 65536 +/** Firmware. */ +#ifndef SOFTNIC_FIRMWARE +#define SOFTNIC_FIRMWARE "firmware.cli" #endif -#ifndef SOFTNIC_SOFT_TM_QUEUE_SIZE -#define SOFTNIC_SOFT_TM_QUEUE_SIZE 64 -#endif - -#ifndef SOFTNIC_SOFT_TM_ENQ_BSZ -#define SOFTNIC_SOFT_TM_ENQ_BSZ 32 -#endif - -#ifndef SOFTNIC_SOFT_TM_DEQ_BSZ -#define SOFTNIC_SOFT_TM_DEQ_BSZ 24 -#endif - -#ifndef SOFTNIC_HARD_TX_QUEUE_ID -#define SOFTNIC_HARD_TX_QUEUE_ID 0 +/** NUMA node ID. */ +#ifndef SOFTNIC_CPU_ID +#define SOFTNIC_CPU_ID 0 #endif /** - * Run the traffic management function on the softnic device - * - * This function read the packets from the softnic input queues, insert into - * QoS scheduler queues based on mbuf sched field value and transmit the - * scheduled packets out through the hard device interface. + * Soft NIC run. * - * @param portid - * port id of the soft device. + * @param port_id + * Port ID of the Soft NIC device. * @return - * zero. + * Zero on success, error code otherwise. */ int diff --git a/drivers/net/softnic/rte_eth_softnic_internals.h b/drivers/net/softnic/rte_eth_softnic_internals.h index 050e3e7e19..6ae5954bf2 100644 --- a/drivers/net/softnic/rte_eth_softnic_internals.h +++ b/drivers/net/softnic/rte_eth_softnic_internals.h @@ -5,9 +5,11 @@ #ifndef __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__ #define __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__ +#include #include #include +#include #include #include #include @@ -18,62 +20,16 @@ * PMD Parameters */ -enum pmd_feature { - PMD_FEATURE_TM = 1, /**< Traffic Management (TM) */ -}; - -#ifndef INTRUSIVE -#define INTRUSIVE 0 -#endif - struct pmd_params { - /** Parameters for the soft device (to be created) */ - struct { - const char *name; /**< Name */ - uint32_t flags; /**< Flags */ - - /** 0 = Access hard device though API only (potentially slower, - * but safer); - * 1 = Access hard device private data structures is allowed - * (potentially faster). - */ - int intrusive; - - /** Traffic Management (TM) */ - struct { - uint32_t rate; /**< Rate (bytes/second) */ - uint32_t nb_queues; /**< Number of queues */ - uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; - /**< Queue size per traffic class */ - uint32_t enq_bsz; /**< Enqueue burst size */ - uint32_t deq_bsz; /**< Dequeue burst size */ - } tm; - } soft; + const char *name; + const char *firmware; + uint32_t cpu_id; - /** Parameters for the hard device (existing) */ + /** Traffic Management (TM) */ struct { - char *name; /**< Name */ - uint16_t tx_queue_id; /**< TX queue ID */ - } hard; -}; - -/** - * Default Internals - */ - -#ifndef DEFAULT_BURST_SIZE -#define DEFAULT_BURST_SIZE 32 -#endif - -#ifndef FLUSH_COUNT_THRESHOLD -#define FLUSH_COUNT_THRESHOLD (1 << 17) -#endif - -struct default_internals { - struct rte_mbuf **pkts; - uint32_t pkts_len; - uint32_t txq_pos; - uint32_t flush_count; + uint32_t n_queues; /**< Number of queues */ + uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]; + } tm; }; /** @@ -185,14 +141,7 @@ struct tm_internals { /** Blueprints */ struct tm_params params; - - /** Run-time */ struct rte_sched_port *sched; - struct rte_mbuf **pkts_enq; - struct rte_mbuf **pkts_deq; - uint32_t pkts_enq_len; - uint32_t txq_pos; - uint32_t flush_count; }; /** @@ -204,22 +153,8 @@ struct pmd_internals { /** Soft device */ struct { - struct default_internals def; /**< Default */ struct tm_internals tm; /**< Traffic Management */ } soft; - - /** Hard device */ - struct { - uint16_t port_id; - } hard; -}; - -struct pmd_rx_queue { - /** Hard device */ - struct { - uint16_t port_id; - uint16_t rx_queue_id; - } hard; }; /** @@ -227,9 +162,6 @@ struct pmd_rx_queue { */ extern const struct rte_tm_ops pmd_tm_ops; -int -tm_params_check(struct pmd_params *params, uint32_t hard_rate); - int tm_init(struct pmd_internals *p, struct pmd_params *params, int numa_node); @@ -243,20 +175,9 @@ void tm_stop(struct pmd_internals *p); static inline int -tm_enabled(struct rte_eth_dev *dev) +tm_used(struct rte_eth_dev *dev __rte_unused) { - struct pmd_internals *p = dev->data->dev_private; - - return (p->params.soft.flags & PMD_FEATURE_TM); -} - -static inline int -tm_used(struct rte_eth_dev *dev) -{ - struct pmd_internals *p = dev->data->dev_private; - - return (p->params.soft.flags & PMD_FEATURE_TM) && - p->soft.tm.h.n_tm_nodes[TM_NODE_LEVEL_PORT]; + return 0; } #endif /* __INCLUDE_RTE_ETH_SOFTNIC_INTERNALS_H__ */ diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c index 11d638a98a..8da8310e2b 100644 --- a/drivers/net/softnic/rte_eth_softnic_tm.c +++ b/drivers/net/softnic/rte_eth_softnic_tm.c @@ -15,50 +15,6 @@ #define SUBPORT_TC_PERIOD 10 #define PIPE_TC_PERIOD 40 -int -tm_params_check(struct pmd_params *params, uint32_t hard_rate) -{ - uint64_t hard_rate_bytes_per_sec = (uint64_t)hard_rate * BYTES_IN_MBPS; - uint32_t i; - - /* rate */ - if (params->soft.tm.rate) { - if (params->soft.tm.rate > hard_rate_bytes_per_sec) - return -EINVAL; - } else { - params->soft.tm.rate = - (hard_rate_bytes_per_sec > UINT32_MAX) ? - UINT32_MAX : hard_rate_bytes_per_sec; - } - - /* nb_queues */ - if (params->soft.tm.nb_queues == 0) - return -EINVAL; - - if (params->soft.tm.nb_queues < RTE_SCHED_QUEUES_PER_PIPE) - params->soft.tm.nb_queues = RTE_SCHED_QUEUES_PER_PIPE; - - params->soft.tm.nb_queues = - rte_align32pow2(params->soft.tm.nb_queues); - - /* qsize */ - for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) { - if (params->soft.tm.qsize[i] == 0) - return -EINVAL; - - params->soft.tm.qsize[i] = - rte_align32pow2(params->soft.tm.qsize[i]); - } - - /* enq_bsz, deq_bsz */ - if (params->soft.tm.enq_bsz == 0 || - params->soft.tm.deq_bsz == 0 || - params->soft.tm.deq_bsz >= params->soft.tm.enq_bsz) - return -EINVAL; - - return 0; -} - static void tm_hierarchy_init(struct pmd_internals *p) { @@ -134,30 +90,9 @@ tm_hierarchy_uninit(struct pmd_internals *p) int tm_init(struct pmd_internals *p, - struct pmd_params *params, - int numa_node) + struct pmd_params *params __rte_unused, + int numa_node __rte_unused) { - uint32_t enq_bsz = params->soft.tm.enq_bsz; - uint32_t deq_bsz = params->soft.tm.deq_bsz; - - p->soft.tm.pkts_enq = rte_zmalloc_socket(params->soft.name, - 2 * enq_bsz * sizeof(struct rte_mbuf *), - 0, - numa_node); - - if (p->soft.tm.pkts_enq == NULL) - return -ENOMEM; - - p->soft.tm.pkts_deq = rte_zmalloc_socket(params->soft.name, - deq_bsz * sizeof(struct rte_mbuf *), - 0, - numa_node); - - if (p->soft.tm.pkts_deq == NULL) { - rte_free(p->soft.tm.pkts_enq); - return -ENOMEM; - } - tm_hierarchy_init(p); return 0; @@ -167,8 +102,6 @@ void tm_free(struct pmd_internals *p) { tm_hierarchy_uninit(p); - rte_free(p->soft.tm.pkts_enq); - rte_free(p->soft.tm.pkts_deq); } int @@ -384,7 +317,7 @@ static uint32_t tm_level_get_max_nodes(struct rte_eth_dev *dev, enum tm_node_level level) { struct pmd_internals *p = dev->data->dev_private; - uint32_t n_queues_max = p->params.soft.tm.nb_queues; + uint32_t n_queues_max = p->params.tm.n_queues; uint32_t n_tc_max = n_queues_max / RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS; uint32_t n_pipes_max = n_tc_max / RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; uint32_t n_subports_max = n_pipes_max; @@ -429,7 +362,7 @@ pmd_tm_node_type_get(struct rte_eth_dev *dev, NULL, rte_strerror(EINVAL)); - *is_leaf = node_id < p->params.soft.tm.nb_queues; + *is_leaf = node_id < p->params.tm.n_queues; return 0; } @@ -1362,7 +1295,7 @@ node_add_check_port(struct rte_eth_dev *dev, params->shaper_profile_id); /* node type: non-leaf */ - if (node_id < p->params.soft.tm.nb_queues) + if (node_id < p->params.tm.n_queues) return -rte_tm_error_set(error, EINVAL, RTE_TM_ERROR_TYPE_NODE_ID, @@ -1385,12 +1318,9 @@ node_add_check_port(struct rte_eth_dev *dev, NULL, rte_strerror(EINVAL)); - /* Shaper must be valid. - * Shaper profile peak rate must fit the configured port rate. - */ + /* Shaper must be valid */ if (params->shaper_profile_id == RTE_TM_SHAPER_PROFILE_ID_NONE || - sp == NULL || - sp->params.peak.rate > p->params.soft.tm.rate) + sp == NULL) return -rte_tm_error_set(error, EINVAL, RTE_TM_ERROR_TYPE_NODE_PARAMS_SHAPER_PROFILE_ID, @@ -1437,7 +1367,7 @@ node_add_check_subport(struct rte_eth_dev *dev, struct pmd_internals *p = dev->data->dev_private; /* node type: non-leaf */ - if (node_id < p->params.soft.tm.nb_queues) + if (node_id < p->params.tm.n_queues) return -rte_tm_error_set(error, EINVAL, RTE_TM_ERROR_TYPE_NODE_ID, @@ -1509,7 +1439,7 @@ node_add_check_pipe(struct rte_eth_dev *dev, struct pmd_internals *p = dev->data->dev_private; /* node type: non-leaf */ - if (node_id < p->params.soft.tm.nb_queues) + if (node_id < p->params.tm.n_queues) return -rte_tm_error_set(error, EINVAL, RTE_TM_ERROR_TYPE_NODE_ID, @@ -1586,7 +1516,7 @@ node_add_check_tc(struct rte_eth_dev *dev, struct pmd_internals *p = dev->data->dev_private; /* node type: non-leaf */ - if (node_id < p->params.soft.tm.nb_queues) + if (node_id < p->params.tm.n_queues) return -rte_tm_error_set(error, EINVAL, RTE_TM_ERROR_TYPE_NODE_ID, @@ -1659,7 +1589,7 @@ node_add_check_queue(struct rte_eth_dev *dev, struct pmd_internals *p = dev->data->dev_private; /* node type: leaf */ - if (node_id >= p->params.soft.tm.nb_queues) + if (node_id >= p->params.tm.n_queues) return -rte_tm_error_set(error, EINVAL, RTE_TM_ERROR_TYPE_NODE_ID, @@ -2548,10 +2478,10 @@ hierarchy_blueprints_create(struct rte_eth_dev *dev) .n_subports_per_port = root->n_children, .n_pipes_per_subport = h->n_tm_nodes[TM_NODE_LEVEL_PIPE] / h->n_tm_nodes[TM_NODE_LEVEL_SUBPORT], - .qsize = {p->params.soft.tm.qsize[0], - p->params.soft.tm.qsize[1], - p->params.soft.tm.qsize[2], - p->params.soft.tm.qsize[3], + .qsize = {p->params.tm.qsize[0], + p->params.tm.qsize[1], + p->params.tm.qsize[2], + p->params.tm.qsize[3], }, .pipe_profiles = t->pipe_profiles, .n_pipe_profiles = t->n_pipe_profiles,